plateform
stringclasses 1
value | repo_name
stringlengths 13
113
| name
stringlengths 3
74
| ext
stringclasses 1
value | path
stringlengths 12
229
| size
int64 23
843k
| source_encoding
stringclasses 9
values | md5
stringlengths 32
32
| text
stringlengths 23
843k
|
---|---|---|---|---|---|---|---|---|
github
|
minjiang/transferlearning-master
|
MyTCA.m
|
.m
|
transferlearning-master/code/MyTCA.m
| 2,818 |
utf_8
|
7aee1d32ebfb97f5974be024ce450ce1
|
function [X_src_new,X_tar_new,A] = MyTCA(X_src,X_tar,options)
% Inputs: [dim is the dimension of features]
%%% X_src:source feature matrix, ns * dim
%%% X_tar:target feature matrix, nt * dim
%%% options:option struct
% Outputs:
%%% X_src_new:transformed source feature matrix, ns * dim_new
%%% X_tar_new:transformed target feature matrix, nt * dim_new
%%% A: adaptation matrix, (ns + nt) * (ns + nt)
%% Set options
lambda = options.lambda; %% lambda for the regularization
dim = options.dim; %% dim is the dimension after adaptation
kernel_type = options.kernel_type; %% kernel_type is the kernel name, primal|linear|rbf
gamma = options.gamma; %% gamma is the bandwidth of rbf kernel
%% Calculate
X = [X_src',X_tar'];
X = X*diag(sparse(1./sqrt(sum(X.^2))));
[m,n] = size(X);
ns = size(X_src,1);
nt = size(X_tar,1);
e = [1/ns*ones(ns,1);-1/nt*ones(nt,1)];
M = e * e';
M = M / norm(M,'fro');
H = eye(n)-1/(n)*ones(n,n);
if strcmp(kernel_type,'primal')
[A,~] = eigs(X*M*X'+lambda*eye(m),X*H*X',dim,'SM');
Z = A' * X;
Z = Z * diag(sparse(1./sqrt(sum(Z.^2))));
X_src_new = Z(:,1:ns)';
X_tar_new = Z(:,ns+1:end)';
else
K = TCA_kernel(kernel_type,X,[],gamma);
[A,~] = eigs(K*M*K'+lambda*eye(n),K*H*K',dim,'SM');
Z = A' * K;
Z = Z*diag(sparse(1./sqrt(sum(Z.^2))));
X_src_new = Z(:,1:ns)';
X_tar_new = Z(:,ns+1:end)';
end
end
% With Fast Computation of the RBF kernel matrix
% To speed up the computation, we exploit a decomposition of the Euclidean distance (norm)
%
% Inputs:
% ker: 'linear','rbf','sam'
% X: data matrix (features * samples)
% gamma: bandwidth of the RBF/SAM kernel
% Output:
% K: kernel matrix
%
% Gustavo Camps-Valls
% 2006(c)
% Jordi ([email protected]), 2007
% 2007-11: if/then -> switch, and fixed RBF kernel
% Modified by Mingsheng Long
% 2013(c)
% Mingsheng Long ([email protected]), 2013
function K = TCA_kernel(ker,X,X2,gamma)
switch ker
case 'linear'
if isempty(X2)
K = X'*X;
else
K = X'*X2;
end
case 'rbf'
n1sq = sum(X.^2,1);
n1 = size(X,2);
if isempty(X2)
D = (ones(n1,1)*n1sq)' + ones(n1,1)*n1sq -2*X'*X;
else
n2sq = sum(X2.^2,1);
n2 = size(X2,2);
D = (ones(n2,1)*n1sq)' + ones(n1,1)*n2sq -2*X'*X2;
end
K = exp(-gamma*D);
case 'sam'
if isempty(X2)
D = X'*X;
else
D = X'*X2;
end
K = exp(-gamma*acos(D).^2);
otherwise
error(['Unsupported kernel ' ker])
end
end
|
github
|
minjiang/transferlearning-master
|
lapgraph.m
|
.m
|
transferlearning-master/code/MyARTL/lapgraph.m
| 20,244 |
utf_8
|
cfed436191fe6a863089f6da80644260
|
function [W, elapse] = lapgraph(fea,options)
% Usage:
% W = graph(fea,options)
%
% fea: Rows of vectors of data points. Each row is x_i
% options: Struct value in Matlab. The fields in options that can be set:
% Metric - Choices are:
% 'Euclidean' - Will use the Euclidean distance of two data
% points to evaluate the "closeness" between
% them. [Default One]
% 'Cosine' - Will use the cosine value of two vectors
% to evaluate the "closeness" between them.
% A popular similarity measure used in
% Information Retrieval.
%
% NeighborMode - Indicates how to construct the graph. Choices
% are: [Default 'KNN']
% 'KNN' - k = 0
% Complete graph
% k > 0
% Put an edge between two nodes if and
% only if they are among the k nearst
% neighbors of each other. You are
% required to provide the parameter k in
% the options. Default k=5.
% 'Supervised' - k = 0
% Put an edge between two nodes if and
% only if they belong to same class.
% k > 0
% Put an edge between two nodes if
% they belong to same class and they
% are among the k nearst neighbors of
% each other.
% Default: k=0
% You are required to provide the label
% information gnd in the options.
%
% WeightMode - Indicates how to assign weights for each edge
% in the graph. Choices are:
% 'Binary' - 0-1 weighting. Every edge receiveds weight
% of 1. [Default One]
% 'HeatKernel' - If nodes i and j are connected, put weight
% W_ij = exp(-norm(x_i - x_j)/2t^2). This
% weight mode can only be used under
% 'Euclidean' metric and you are required to
% provide the parameter t.
% 'Cosine' - If nodes i and j are connected, put weight
% cosine(x_i,x_j). Can only be used under
% 'Cosine' metric.
%
% k - The parameter needed under 'KNN' NeighborMode.
% Default will be 5.
% gnd - The parameter needed under 'Supervised'
% NeighborMode. Colunm vector of the label
% information for each data point.
% bLDA - 0 or 1. Only effective under 'Supervised'
% NeighborMode. If 1, the graph will be constructed
% to make LPP exactly same as LDA. Default will be
% 0.
% t - The parameter needed under 'HeatKernel'
% WeightMode. Default will be 1
% bNormalized - 0 or 1. Only effective under 'Cosine' metric.
% Indicates whether the fea are already be
% normalized to 1. Default will be 0
% bSelfConnected - 0 or 1. Indicates whether W(i,i) == 1. Default 1
% if 'Supervised' NeighborMode & bLDA == 1,
% bSelfConnected will always be 1. Default 1.
%
%
% Examples:
%
% fea = rand(50,15);
% options = [];
% options.Metric = 'Euclidean';
% options.NeighborMode = 'KNN';
% options.k = 5;
% options.WeightMode = 'HeatKernel';
% options.t = 1;
% W = constructW(fea,options);
%
%
% fea = rand(50,15);
% gnd = [ones(10,1);ones(15,1)*2;ones(10,1)*3;ones(15,1)*4];
% options = [];
% options.Metric = 'Euclidean';
% options.NeighborMode = 'Supervised';
% options.gnd = gnd;
% options.WeightMode = 'HeatKernel';
% options.t = 1;
% W = constructW(fea,options);
%
%
% fea = rand(50,15);
% gnd = [ones(10,1);ones(15,1)*2;ones(10,1)*3;ones(15,1)*4];
% options = [];
% options.Metric = 'Euclidean';
% options.NeighborMode = 'Supervised';
% options.gnd = gnd;
% options.bLDA = 1;
% W = constructW(fea,options);
%
%
% For more details about the different ways to construct the W, please
% refer:
% Deng Cai, Xiaofei He and Jiawei Han, "Document Clustering Using
% Locality Preserving Indexing" IEEE TKDE, Dec. 2005.
%
%
% Written by Deng Cai (dengcai2 AT cs.uiuc.edu), April/2004, Feb/2006,
% May/2007
%
if (~exist('options','var'))
options = [];
else
if ~isstruct(options)
error('parameter error!');
end
end
%=================================================
if ~isfield(options,'Metric')
options.Metric = 'Cosine';
end
switch lower(options.Metric)
case {lower('Euclidean')}
case {lower('Cosine')}
if ~isfield(options,'bNormalized')
options.bNormalized = 0;
end
otherwise
error('Metric does not exist!');
end
%=================================================
if ~isfield(options,'NeighborMode')
options.NeighborMode = 'KNN';
end
switch lower(options.NeighborMode)
case {lower('KNN')} %For simplicity, we include the data point itself in the kNN
if ~isfield(options,'k')
options.k = 5;
end
case {lower('Supervised')}
if ~isfield(options,'bLDA')
options.bLDA = 0;
end
if options.bLDA
options.bSelfConnected = 1;
end
if ~isfield(options,'k')
options.k = 0;
end
if ~isfield(options,'gnd')
error('Label(gnd) should be provided under ''Supervised'' NeighborMode!');
end
if ~isempty(fea) && length(options.gnd) ~= size(fea,1)
error('gnd doesn''t match with fea!');
end
otherwise
error('NeighborMode does not exist!');
end
%=================================================
if ~isfield(options,'WeightMode')
options.WeightMode = 'Binary';
end
bBinary = 0;
switch lower(options.WeightMode)
case {lower('Binary')}
bBinary = 1;
case {lower('HeatKernel')}
if ~strcmpi(options.Metric,'Euclidean')
warning('''HeatKernel'' WeightMode should be used under ''Euclidean'' Metric!');
options.Metric = 'Euclidean';
end
if ~isfield(options,'t')
options.t = 1;
end
case {lower('Cosine')}
if ~strcmpi(options.Metric,'Cosine')
warning('''Cosine'' WeightMode should be used under ''Cosine'' Metric!');
options.Metric = 'Cosine';
end
if ~isfield(options,'bNormalized')
options.bNormalized = 0;
end
otherwise
error('WeightMode does not exist!');
end
%=================================================
if ~isfield(options,'bSelfConnected')
options.bSelfConnected = 1;
end
%=================================================
tmp_T = cputime;
if isfield(options,'gnd')
nSmp = length(options.gnd);
else
nSmp = size(fea,1);
end
maxM = 62500000; %500M
BlockSize = floor(maxM/(nSmp*3));
if strcmpi(options.NeighborMode,'Supervised')
Label = unique(options.gnd);
nLabel = length(Label);
if options.bLDA
G = zeros(nSmp,nSmp);
for idx=1:nLabel
classIdx = options.gnd==Label(idx);
G(classIdx,classIdx) = 1/sum(classIdx);
end
W = sparse(G);
elapse = cputime - tmp_T;
return;
end
switch lower(options.WeightMode)
case {lower('Binary')}
if options.k > 0
G = zeros(nSmp*(options.k+1),3);
idNow = 0;
for i=1:nLabel
classIdx = find(options.gnd==Label(i));
D = EuDist2(fea(classIdx,:),[],0);
[dump idx] = sort(D,2); % sort each row
clear D dump;
idx = idx(:,1:options.k+1);
nSmpClass = length(classIdx)*(options.k+1);
G(idNow+1:nSmpClass+idNow,1) = repmat(classIdx,[options.k+1,1]);
G(idNow+1:nSmpClass+idNow,2) = classIdx(idx(:));
G(idNow+1:nSmpClass+idNow,3) = 1;
idNow = idNow+nSmpClass;
clear idx
end
G = sparse(G(:,1),G(:,2),G(:,3),nSmp,nSmp);
G = max(G,G');
else
G = zeros(nSmp,nSmp);
for i=1:nLabel
classIdx = find(options.gnd==Label(i));
G(classIdx,classIdx) = 1;
end
end
if ~options.bSelfConnected
for i=1:size(G,1)
G(i,i) = 0;
end
end
W = sparse(G);
case {lower('HeatKernel')}
if options.k > 0
G = zeros(nSmp*(options.k+1),3);
idNow = 0;
for i=1:nLabel
classIdx = find(options.gnd==Label(i));
D = EuDist2(fea(classIdx,:),[],0);
[dump idx] = sort(D,2); % sort each row
clear D;
idx = idx(:,1:options.k+1);
dump = dump(:,1:options.k+1);
dump = exp(-dump/(2*options.t^2));
nSmpClass = length(classIdx)*(options.k+1);
G(idNow+1:nSmpClass+idNow,1) = repmat(classIdx,[options.k+1,1]);
G(idNow+1:nSmpClass+idNow,2) = classIdx(idx(:));
G(idNow+1:nSmpClass+idNow,3) = dump(:);
idNow = idNow+nSmpClass;
clear dump idx
end
G = sparse(G(:,1),G(:,2),G(:,3),nSmp,nSmp);
else
G = zeros(nSmp,nSmp);
for i=1:nLabel
classIdx = find(options.gnd==Label(i));
D = EuDist2(fea(classIdx,:),[],0);
D = exp(-D/(2*options.t^2));
G(classIdx,classIdx) = D;
end
end
if ~options.bSelfConnected
for i=1:size(G,1)
G(i,i) = 0;
end
end
W = sparse(max(G,G'));
case {lower('Cosine')}
if ~options.bNormalized
[nSmp, nFea] = size(fea);
if issparse(fea)
fea2 = fea';
feaNorm = sum(fea2.^2,1).^.5;
for i = 1:nSmp
fea2(:,i) = fea2(:,i) ./ max(1e-10,feaNorm(i));
end
fea = fea2';
clear fea2;
else
feaNorm = sum(fea.^2,2).^.5;
for i = 1:nSmp
fea(i,:) = fea(i,:) ./ max(1e-12,feaNorm(i));
end
end
end
if options.k > 0
G = zeros(nSmp*(options.k+1),3);
idNow = 0;
for i=1:nLabel
classIdx = find(options.gnd==Label(i));
D = fea(classIdx,:)*fea(classIdx,:)';
[dump idx] = sort(-D,2); % sort each row
clear D;
idx = idx(:,1:options.k+1);
dump = -dump(:,1:options.k+1);
nSmpClass = length(classIdx)*(options.k+1);
G(idNow+1:nSmpClass+idNow,1) = repmat(classIdx,[options.k+1,1]);
G(idNow+1:nSmpClass+idNow,2) = classIdx(idx(:));
G(idNow+1:nSmpClass+idNow,3) = dump(:);
idNow = idNow+nSmpClass;
clear dump idx
end
G = sparse(G(:,1),G(:,2),G(:,3),nSmp,nSmp);
else
G = zeros(nSmp,nSmp);
for i=1:nLabel
classIdx = find(options.gnd==Label(i));
G(classIdx,classIdx) = fea(classIdx,:)*fea(classIdx,:)';
end
end
if ~options.bSelfConnected
for i=1:size(G,1)
G(i,i) = 0;
end
end
W = sparse(max(G,G'));
otherwise
error('WeightMode does not exist!');
end
elapse = cputime - tmp_T;
return;
end
if strcmpi(options.NeighborMode,'KNN') && (options.k > 0)
if strcmpi(options.Metric,'Euclidean')
G = zeros(nSmp*(options.k+1),3);
for i = 1:ceil(nSmp/BlockSize)
if i == ceil(nSmp/BlockSize)
smpIdx = (i-1)*BlockSize+1:nSmp;
dist = EuDist2(fea(smpIdx,:),fea,0);
dist = full(dist);
[dump idx] = sort(dist,2); % sort each row
idx = idx(:,1:options.k+1);
dump = dump(:,1:options.k+1);
if ~bBinary
dump = exp(-dump/(2*options.t^2));
end
G((i-1)*BlockSize*(options.k+1)+1:nSmp*(options.k+1),1) = repmat(smpIdx',[options.k+1,1]);
G((i-1)*BlockSize*(options.k+1)+1:nSmp*(options.k+1),2) = idx(:);
if ~bBinary
G((i-1)*BlockSize*(options.k+1)+1:nSmp*(options.k+1),3) = dump(:);
else
G((i-1)*BlockSize*(options.k+1)+1:nSmp*(options.k+1),3) = 1;
end
else
smpIdx = (i-1)*BlockSize+1:i*BlockSize;
dist = EuDist2(fea(smpIdx,:),fea,0);
dist = full(dist);
[dump idx] = sort(dist,2); % sort each row
idx = idx(:,1:options.k+1);
dump = dump(:,1:options.k+1);
if ~bBinary
dump = exp(-dump/(2*options.t^2));
end
G((i-1)*BlockSize*(options.k+1)+1:i*BlockSize*(options.k+1),1) = repmat(smpIdx',[options.k+1,1]);
G((i-1)*BlockSize*(options.k+1)+1:i*BlockSize*(options.k+1),2) = idx(:);
if ~bBinary
G((i-1)*BlockSize*(options.k+1)+1:i*BlockSize*(options.k+1),3) = dump(:);
else
G((i-1)*BlockSize*(options.k+1)+1:i*BlockSize*(options.k+1),3) = 1;
end
end
end
W = sparse(G(:,1),G(:,2),G(:,3),nSmp,nSmp);
else
if ~options.bNormalized
[nSmp, nFea] = size(fea);
if issparse(fea)
fea2 = fea';
clear fea;
for i = 1:nSmp
fea2(:,i) = fea2(:,i) ./ max(1e-10,sum(fea2(:,i).^2,1).^.5);
end
fea = fea2';
clear fea2;
else
feaNorm = sum(fea.^2,2).^.5;
for i = 1:nSmp
fea(i,:) = fea(i,:) ./ max(1e-12,feaNorm(i));
end
end
end
G = zeros(nSmp*(options.k+1),3);
for i = 1:ceil(nSmp/BlockSize)
if i == ceil(nSmp/BlockSize)
smpIdx = (i-1)*BlockSize+1:nSmp;
dist = fea(smpIdx,:)*fea';
dist = full(dist);
[dump idx] = sort(-dist,2); % sort each row
idx = idx(:,1:options.k+1);
dump = -dump(:,1:options.k+1);
G((i-1)*BlockSize*(options.k+1)+1:nSmp*(options.k+1),1) = repmat(smpIdx',[options.k+1,1]);
G((i-1)*BlockSize*(options.k+1)+1:nSmp*(options.k+1),2) = idx(:);
G((i-1)*BlockSize*(options.k+1)+1:nSmp*(options.k+1),3) = dump(:);
else
smpIdx = (i-1)*BlockSize+1:i*BlockSize;
dist = fea(smpIdx,:)*fea';
dist = full(dist);
[dump idx] = sort(-dist,2); % sort each row
idx = idx(:,1:options.k+1);
dump = -dump(:,1:options.k+1);
G((i-1)*BlockSize*(options.k+1)+1:i*BlockSize*(options.k+1),1) = repmat(smpIdx',[options.k+1,1]);
G((i-1)*BlockSize*(options.k+1)+1:i*BlockSize*(options.k+1),2) = idx(:);
G((i-1)*BlockSize*(options.k+1)+1:i*BlockSize*(options.k+1),3) = dump(:);
end
end
W = sparse(G(:,1),G(:,2),G(:,3),nSmp,nSmp);
end
if strcmpi(options.WeightMode,'Binary')
W(find(W)) = 1;
end
if isfield(options,'bSemiSupervised') && options.bSemiSupervised
tmpgnd = options.gnd(options.semiSplit);
Label = unique(tmpgnd);
nLabel = length(Label);
G = zeros(sum(options.semiSplit),sum(options.semiSplit));
for idx=1:nLabel
classIdx = tmpgnd==Label(idx);
G(classIdx,classIdx) = 1;
end
Wsup = sparse(G);
if ~isfield(options,'SameCategoryWeight')
options.SameCategoryWeight = 1;
end
W(options.semiSplit,options.semiSplit) = (Wsup>0)*options.SameCategoryWeight;
end
if ~options.bSelfConnected
for i=1:size(W,1)
W(i,i) = 0;
end
end
W = max(W,W');
elapse = cputime - tmp_T;
return;
end
% strcmpi(options.NeighborMode,'KNN') & (options.k == 0)
% Complete Graph
if strcmpi(options.Metric,'Euclidean')
W = EuDist2(fea,[],0);
W = exp(-W/(2*options.t^2));
else
if ~options.bNormalized
% feaNorm = sum(fea.^2,2).^.5;
% fea = fea ./ repmat(max(1e-10,feaNorm),1,size(fea,2));
[nSmp, nFea] = size(fea);
if issparse(fea)
fea2 = fea';
feaNorm = sum(fea2.^2,1).^.5;
for i = 1:nSmp
fea2(:,i) = fea2(:,i) ./ max(1e-10,feaNorm(i));
end
fea = fea2';
clear fea2;
else
feaNorm = sum(fea.^2,2).^.5;
for i = 1:nSmp
fea(i,:) = fea(i,:) ./ max(1e-12,feaNorm(i));
end
end
end
% W = full(fea*fea');
W = fea*fea';
end
if ~options.bSelfConnected
for i=1:size(W,1)
W(i,i) = 0;
end
end
W = max(W,W');
elapse = cputime - tmp_T;
function D = EuDist2(fea_a,fea_b,bSqrt)
% Euclidean Distance matrix
% D = EuDist(fea_a,fea_b)
% fea_a: nSample_a * nFeature
% fea_b: nSample_b * nFeature
% D: nSample_a * nSample_a
% or nSample_a * nSample_b
if ~exist('bSqrt','var')
bSqrt = 1;
end
if (~exist('fea_b','var')) | isempty(fea_b)
[nSmp, nFea] = size(fea_a);
aa = sum(fea_a.*fea_a,2);
ab = fea_a*fea_a';
aa = full(aa);
ab = full(ab);
if bSqrt
D = sqrt(repmat(aa, 1, nSmp) + repmat(aa', nSmp, 1) - 2*ab);
D = real(D);
else
D = repmat(aa, 1, nSmp) + repmat(aa', nSmp, 1) - 2*ab;
end
D = max(D,D');
D = D - diag(diag(D));
D = abs(D);
else
[nSmp_a, nFea] = size(fea_a);
[nSmp_b, nFea] = size(fea_b);
aa = sum(fea_a.*fea_a,2);
bb = sum(fea_b.*fea_b,2);
ab = fea_a*fea_b';
aa = full(aa);
bb = full(bb);
ab = full(ab);
if bSqrt
D = sqrt(repmat(aa, 1, nSmp_b) + repmat(bb', nSmp_a, 1) - 2*ab);
D = real(D);
else
D = repmat(aa, 1, nSmp_b) + repmat(bb', nSmp_a, 1) - 2*ab;
end
D = abs(D);
end
|
github
|
minjiang/transferlearning-master
|
MyARTL.m
|
.m
|
transferlearning-master/code/MyARTL/MyARTL.m
| 3,503 |
utf_8
|
91802921f23d322f2ffca0e311f9372a
|
function [acc,acc_ite,Alpha] = MyARTL(X_src,Y_src,X_tar,Y_tar,options)
% Inputs:
%%% X_src :source feature matrix, ns * m
%%% Y_src :source label vector, ns * 1
%%% X_tar :target feature matrix, nt * m
%%% Y_tar :target label vector, nt * 1
%%% options:option struct
% Outputs:
%%% acc :final accuracy using knn, float
%%% acc_ite:list of all accuracies during iterations
%%% A :final adaptation matrix, (ns + nt) * (ns + nt)
%% Set options
lambda = options.lambda; %% lambda for the regularization
kernel_type = options.kernel_type; %% kernel_type is the kernel name, primal|linear|rbf
T = options.T; %% iteration number
n_neighbor = options.n_neighbor;
sigma = options.sigma;
gamma = options.gamma;
X = [X_src',X_tar'];
Y = [Y_src;Y_tar];
X = X*diag(sparse(1./sqrt(sum(X.^2))));
ns = size(X_src,1);
nt = size(X_tar,1);
nm = ns + nt;
e = [1/ns*ones(ns,1);-1/nt*ones(nt,1)];
C = length(unique(Y_src));
E = diag(sparse([ones(ns,1);zeros(nt,1)]));
YY = [];
for c = reshape(unique(Y),1,length(unique(Y)))
YY = [YY,Y==c];
end
%% Construct graph laplacian
manifold.k = options.n_neighbor;
manifold.Metric = 'Cosine';
manifold.NeighborMode = 'KNN';
manifold.WeightMode = 'Cosine';
[W,Dw,L] = construct_lapgraph(X',manifold);
%%% M0
M = e * e' * C; %multiply C for better normalization
acc_ite = [];
Y_tar_pseudo = [];
% If want to include conditional distribution in iteration 1, then open
% this
% if ~isfield(options,'Yt0')
% % model = train(Y(1:ns),sparse(X(:,1:ns)'),'-s 0 -c 1 -q 1');
% % [Y_tar_pseudo,~] = predict(Y(ns+1:end),sparse(X(:,ns+1:end)'),model);
% knn_model = fitcknn(X_src,Y_src,'NumNeighbors',1);
% Y_tar_pseudo = knn_model.predict(X_tar);
% else
% Y_tar_pseudo = options.Yt0;
% end
%% Iteration
for i = 1 : T
%%% Mc
N = 0;
if ~isempty(Y_tar_pseudo) && length(Y_tar_pseudo)==nt
for c = reshape(unique(Y_src),1,C)
e = zeros(nm,1);
e(Y_src==c) = 1 / length(find(Y_src==c));
e(ns+find(Y_tar_pseudo==c)) = -1 / length(find(Y_tar_pseudo==c));
e(isinf(e)) = 0;
N = N + e*e';
end
end
M = M + N;
M = M / norm(M,'fro');
%% Calculation
K = kernel_artl(kernel_type,X,sqrt(sum(sum(X.^2).^0.5)/nm));
Alpha = ((E + lambda * M + gamma * L) * K + sigma * speye(nm,nm)) \ (E * YY);
F = K * Alpha;
[~,Cls] = max(F,[],2);
Acc = numel(find(Cls(ns+1:end)==Y(ns+1:end)))/nt;
Y_tar_pseudo = Cls(ns+1:end);
fprintf('Iteration [%2d]:ARTL=%0.4f\n',i,Acc);
acc_ite = [acc_ite;Acc];
end
end
function [W,Dw,L] = construct_lapgraph(X,options)
W = lapgraph(X,options);
Dw = diag(sparse(sqrt(1./sum(W))));
L = speye(size(X,1)) - Dw * W * Dw;
end
function K = kernel_artl(ker,X,sigma)
switch ker
case 'linear'
K = X' * X;
case 'rbf'
n1sq = sum(X.^2,1);
n1 = size(X,2);
D = (ones(n1,1)*n1sq)' + ones(n1,1)*n1sq -2*X'*X;
K = exp(-D/(2*sigma^2));
case 'sam'
D = X'*X;
K = exp(-acos(D).^2/(2*sigma^2));
otherwise
error(['Unsupported kernel ' ker])
end
end
|
github
|
100957264/WatchLauncher-master
|
echo_diagnostic.m
|
.m
|
WatchLauncher-master/NormalTools/studio/android/app/src/main/jni/libspeex/echo_diagnostic.m
| 2,076 |
utf_8
|
8d5e7563976fbd9bd2eda26711f7d8dc
|
% Attempts to diagnose AEC problems from recorded samples
%
% out = echo_diagnostic(rec_file, play_file, out_file, tail_length)
%
% Computes the full matrix inversion to cancel echo from the
% recording 'rec_file' using the far end signal 'play_file' using
% a filter length of 'tail_length'. The output is saved to 'out_file'.
function out = echo_diagnostic(rec_file, play_file, out_file, tail_length)
F=fopen(rec_file,'rb');
rec=fread(F,Inf,'short');
fclose (F);
F=fopen(play_file,'rb');
play=fread(F,Inf,'short');
fclose (F);
rec = [rec; zeros(1024,1)];
play = [play; zeros(1024,1)];
N = length(rec);
corr = real(ifft(fft(rec).*conj(fft(play))));
acorr = real(ifft(fft(play).*conj(fft(play))));
[a,b] = max(corr);
if b > N/2
b = b-N;
end
printf ("Far end to near end delay is %d samples\n", b);
if (b > .3*tail_length)
printf ('This is too much delay, try delaying the far-end signal a bit\n');
else if (b < 0)
printf ('You have a negative delay, the echo canceller has no chance to cancel anything!\n');
else
printf ('Delay looks OK.\n');
end
end
end
N2 = round(N/2);
corr1 = real(ifft(fft(rec(1:N2)).*conj(fft(play(1:N2)))));
corr2 = real(ifft(fft(rec(N2+1:end)).*conj(fft(play(N2+1:end)))));
[a,b1] = max(corr1);
if b1 > N2/2
b1 = b1-N2;
end
[a,b2] = max(corr2);
if b2 > N2/2
b2 = b2-N2;
end
drift = (b1-b2)/N2;
printf ('Drift estimate is %f%% (%d samples)\n', 100*drift, b1-b2);
if abs(b1-b2) < 10
printf ('A drift of a few (+-10) samples is normal.\n');
else
if abs(b1-b2) < 30
printf ('There may be (not sure) excessive clock drift. Is the capture and playback done on the same soundcard?\n');
else
printf ('Your clock is drifting! No way the AEC will be able to do anything with that. Most likely, you''re doing capture and playback from two different cards.\n');
end
end
end
acorr(1) = .001+1.00001*acorr(1);
AtA = toeplitz(acorr(1:tail_length));
bb = corr(1:tail_length);
h = AtA\bb;
out = (rec - filter(h, 1, play));
F=fopen(out_file,'w');
fwrite(F,out,'short');
fclose (F);
|
github
|
jkjung-avt/py-faster-rcnn-master
|
voc_eval.m
|
.m
|
py-faster-rcnn-master/lib/datasets/VOCdevkit-matlab-wrapper/voc_eval.m
| 1,332 |
utf_8
|
3ee1d5373b091ae4ab79d26ab657c962
|
function res = voc_eval(path, comp_id, test_set, output_dir)
VOCopts = get_voc_opts(path);
VOCopts.testset = test_set;
for i = 1:length(VOCopts.classes)
cls = VOCopts.classes{i};
res(i) = voc_eval_cls(cls, VOCopts, comp_id, output_dir);
end
fprintf('\n~~~~~~~~~~~~~~~~~~~~\n');
fprintf('Results:\n');
aps = [res(:).ap]';
fprintf('%.1f\n', aps * 100);
fprintf('%.1f\n', mean(aps) * 100);
fprintf('~~~~~~~~~~~~~~~~~~~~\n');
function res = voc_eval_cls(cls, VOCopts, comp_id, output_dir)
test_set = VOCopts.testset;
year = VOCopts.dataset(4:end);
addpath(fullfile(VOCopts.datadir, 'VOCcode'));
res_fn = sprintf(VOCopts.detrespath, comp_id, cls);
recall = [];
prec = [];
ap = 0;
ap_auc = 0;
do_eval = (str2num(year) <= 2007) | ~strcmp(test_set, 'test');
if do_eval
% Bug in VOCevaldet requires that tic has been called first
tic;
[recall, prec, ap] = VOCevaldet(VOCopts, comp_id, cls, true);
ap_auc = xVOCap(recall, prec);
% force plot limits
ylim([0 1]);
xlim([0 1]);
print(gcf, '-djpeg', '-r0', ...
[output_dir '/' cls '_pr.jpg']);
end
fprintf('!!! %s : %.4f %.4f\n', cls, ap, ap_auc);
res.recall = recall;
res.prec = prec;
res.ap = ap;
res.ap_auc = ap_auc;
save([output_dir '/' cls '_pr.mat'], ...
'res', 'recall', 'prec', 'ap', 'ap_auc');
rmpath(fullfile(VOCopts.datadir, 'VOCcode'));
|
github
|
vkalogeiton/caffe-master
|
classification_demo.m
|
.m
|
caffe-master/matlab/demo/classification_demo.m
| 5,466 |
utf_8
|
45745fb7cfe37ef723c307dfa06f1b97
|
function [scores, maxlabel] = classification_demo(im, use_gpu)
% [scores, maxlabel] = classification_demo(im, use_gpu)
%
% Image classification demo using BVLC CaffeNet.
%
% IMPORTANT: before you run this demo, you should download BVLC CaffeNet
% from Model Zoo (http://caffe.berkeleyvision.org/model_zoo.html)
%
% ****************************************************************************
% For detailed documentation and usage on Caffe's Matlab interface, please
% refer to the Caffe Interface Tutorial at
% http://caffe.berkeleyvision.org/tutorial/interfaces.html#matlab
% ****************************************************************************
%
% input
% im color image as uint8 HxWx3
% use_gpu 1 to use the GPU, 0 to use the CPU
%
% output
% scores 1000-dimensional ILSVRC score vector
% maxlabel the label of the highest score
%
% You may need to do the following before you start matlab:
% $ export LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/usr/local/cuda-5.5/lib64
% $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6
% Or the equivalent based on where things are installed on your system
% and what versions are installed.
%
% Usage:
% im = imread('../../examples/images/cat.jpg');
% scores = classification_demo(im, 1);
% [score, class] = max(scores);
% Five things to be aware of:
% caffe uses row-major order
% matlab uses column-major order
% caffe uses BGR color channel order
% matlab uses RGB color channel order
% images need to have the data mean subtracted
% Data coming in from matlab needs to be in the order
% [width, height, channels, images]
% where width is the fastest dimension.
% Here is the rough matlab code for putting image data into the correct
% format in W x H x C with BGR channels:
% % permute channels from RGB to BGR
% im_data = im(:, :, [3, 2, 1]);
% % flip width and height to make width the fastest dimension
% im_data = permute(im_data, [2, 1, 3]);
% % convert from uint8 to single
% im_data = single(im_data);
% % reshape to a fixed size (e.g., 227x227).
% im_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear');
% % subtract mean_data (already in W x H x C with BGR channels)
% im_data = im_data - mean_data;
% If you have multiple images, cat them with cat(4, ...)
% Add caffe/matlab to your Matlab search PATH in order to use matcaffe
if exist('../+caffe', 'dir')
addpath('..');
else
error('Please run this demo from caffe/matlab/demo');
end
% Set caffe mode
if exist('use_gpu', 'var') && use_gpu
caffe.set_mode_gpu();
gpu_id = 0; % we will use the first gpu in this demo
caffe.set_device(gpu_id);
else
caffe.set_mode_cpu();
end
% Initialize the network using BVLC CaffeNet for image classification
% Weights (parameter) file needs to be downloaded from Model Zoo.
model_dir = '../../models/bvlc_reference_caffenet/';
net_model = [model_dir 'deploy.prototxt'];
net_weights = [model_dir 'bvlc_reference_caffenet.caffemodel'];
phase = 'test'; % run with phase test (so that dropout isn't applied)
if ~exist(net_weights, 'file')
error('Please download CaffeNet from Model Zoo before you run this demo');
end
% Initialize a network
net = caffe.Net(net_model, net_weights, phase);
if nargin < 1
% For demo purposes we will use the cat image
fprintf('using caffe/examples/images/cat.jpg as input image\n');
im = imread('../../examples/images/cat.jpg');
end
% prepare oversampled input
% input_data is Height x Width x Channel x Num
tic;
input_data = {prepare_image(im)};
toc;
% do forward pass to get scores
% scores are now Channels x Num, where Channels == 1000
tic;
% The net forward function. It takes in a cell array of N-D arrays
% (where N == 4 here) containing data of input blob(s) and outputs a cell
% array containing data from output blob(s)
scores = net.forward(input_data);
toc;
scores = scores{1};
scores = mean(scores, 2); % take average scores over 10 crops
[~, maxlabel] = max(scores);
% call caffe.reset_all() to reset caffe
caffe.reset_all();
% ------------------------------------------------------------------------
function crops_data = prepare_image(im)
% ------------------------------------------------------------------------
% caffe/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat contains mean_data that
% is already in W x H x C with BGR channels
d = load('../+caffe/imagenet/ilsvrc_2012_mean.mat');
mean_data = d.mean_data;
IMAGE_DIM = 256;
CROPPED_DIM = 227;
% Convert an image returned by Matlab's imread to im_data in caffe's data
% format: W x H x C with BGR channels
im_data = im(:, :, [3, 2, 1]); % permute channels from RGB to BGR
im_data = permute(im_data, [2, 1, 3]); % flip width and height
im_data = single(im_data); % convert from uint8 to single
im_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear'); % resize im_data
im_data = im_data - mean_data; % subtract mean_data (already in W x H x C, BGR)
% oversample (4 corners, center, and their x-axis flips)
crops_data = zeros(CROPPED_DIM, CROPPED_DIM, 3, 10, 'single');
indices = [0 IMAGE_DIM-CROPPED_DIM] + 1;
n = 1;
for i = indices
for j = indices
crops_data(:, :, :, n) = im_data(i:i+CROPPED_DIM-1, j:j+CROPPED_DIM-1, :);
crops_data(:, :, :, n+5) = crops_data(end:-1:1, :, :, n);
n = n + 1;
end
end
center = floor(indices(2) / 2) + 1;
crops_data(:,:,:,5) = ...
im_data(center:center+CROPPED_DIM-1,center:center+CROPPED_DIM-1,:);
crops_data(:,:,:,10) = crops_data(end:-1:1, :, :, 5);
|
github
|
lederman/Prol-master
|
gpsf_report1_figures.m
|
.m
|
Prol-master/doc/figures/gpsf_report1_figures.m
| 3,770 |
utf_8
|
e0676d6ce41e08e23c42591f0ce93954
|
%
% prol
% Demosntration code for computing generalized prolate spheroidal functions.
% (Matlab(R) version)
%
% Author: Roy R. Lederman
% http://roy.lederman.name/
% http://github.com/lederman/prol
%
% This code generates the figures for the paper gpsf_report1.tex
%
function gpsf_report1_figures()
% run matlab_addpath_prol_src() in /src/matlab before running this code.
file_header = 'gpsf_report1_'
report_part001(file_header)
end
function report_part001(file_header)
%
% sample eigenvalues figures
%
c=pi*20;
D=3;
h1=figure; % eigenvalues magnitude |\nu|
h2=figure; % eigenvalues magnitude is close to one: |1-|\nu||
matdim = 800;
minEigenvalRatio = 10^-40;
prolate_crea_options.isfixfirst = 1;
Ns = [0:5:20];
for j1=1:length(Ns)
N=Ns(j1);
tic
[prolate_dat, iserr , ~] = prolate_crea(c,D,N,minEigenvalRatio, matdim, prolate_crea_options);
toc
figure(h1)
semilogy([0:prolate_dat.num_prols-1],(abs(prolate_dat.nu)),'LineWidth',3)
hold on
figure(h2)
semilogy([0:prolate_dat.num_prols-1],max(abs(1-abs(prolate_dat.nu)),10^-20),'LineWidth',3)
% the max is taken to avoid log(0) when |\nu| = 1 exactly.
hold on
end
figure(h1)
ylim([10^-30,3])
xlabel('n')
lgd=legend(num2str(Ns'));
%title(lgd,'N=')
set(gca,'FontSize', 12);
ylabel('|\nu_n|','FontSize', 14)
print([file_header,'D3_eigenvals.png'],'-dpng')
figure(h2)
ylim([10^-16,3])
xlabel('n')
lgd=legend(num2str(Ns'));
%title(lgd,'N=')
set(gca,'FontSize', 12);
ylabel('|1-|\nu_n||','FontSize', 14)
print([file_header,'D3_eigenvals_to_one.png'],'-dpng')
%
% Without fixing the coefficients of the first eigenvector
%
h1=figure;
h2=figure;
prolate_crea_options.isfixfirst = 0;
Ns = [0:3:20];
for j1=1:length(Ns)
N=Ns(j1);
tic
[prolate_dat, iserr , ~] = prolate_crea(c,D,N,minEigenvalRatio, matdim, prolate_crea_options);
toc
figure(h1)
semilogy([0:prolate_dat.num_prols-1],(abs(prolate_dat.nu)),'LineWidth',3)
hold on
figure(h2)
semilogy([0:prolate_dat.num_prols-1],abs(1-abs(prolate_dat.nu)),'LineWidth',3)
hold on
end
figure(h1)
ylim([10^-30,3])
xlabel('n')
lgd=legend(num2str(Ns'));
%title(lgd,'N=')
set(gca,'FontSize', 12);
ylabel('\nu_n','FontSize', 14)
%
% sample eigenfunctions figures
%
c= 20 * pi;
D=3;
N=0;
matdim = 800;
xx = linspace(0,1,1000);
minEigenvalRatio = 10^-30;
prolate_crea_options.isfixfirst = 1;
tic
[prolate_dat, iserr , ~] = prolate_crea(c,D,N,minEigenvalRatio, matdim, prolate_crea_options);
toc
tic
[v] = prolate_ev(prolate_dat, [0:prolate_dat.num_prols-1], xx);
toc
h1 = figure;
funcid = [0:1,2,5,10];
plot(xx,v(:,funcid+1),'LineWidth',2);
xlabel('x')
lgd=legend(num2str(funcid'));
set(gca,'FontSize', 12);
ylabel('\Phi_{N,n}(x)','FontSize', 14)
print([file_header,'D3_N0_eigenfuncs.png'],'-dpng')
%
%
%
N=1;
tic
[prolate_dat, iserr , ~] = prolate_crea(c,D,N,minEigenvalRatio, matdim, prolate_crea_options);
toc
tic
[v] = prolate_ev(prolate_dat, [0:prolate_dat.num_prols-1], xx);
toc
h1 = figure;
funcid = [0:1,2,5,10];
plot(xx,v(:,funcid+1),'LineWidth',2);
xlabel('x')
lgd=legend(num2str(funcid'));
set(gca,'FontSize', 12);
ylabel('\Phi_{N,n}(x)','FontSize', 14)
print([file_header,'D3_N1_eigenfuncs.png'],'-dpng')
end
|
github
|
lederman/Prol-master
|
prolate_ev.m
|
.m
|
Prol-master/src/matlab/prolate_ev.m
| 647 |
utf_8
|
ea9b52e826d67d93d39a8d5ebe4dee78
|
function [v] = prolate_ev(prolate_dat, prolate_ids, xx)
%
% Evaluates the prolate functions.
%
% Input:
% * prolate_dat : precomputed data structure (prolate_crea).
% * prolate_ids : which prolates to compute. vector of ids between 0 and prolate_dat.num_prols-1.
% * xx : vector of points in the interval [0,1] where the prolates should be evaluated
% Output:
% * v : matrix of evaluate prolates.
% each column refers to a different prolate, each row to a different coordinate.
%
%
assert( prolate_dat.type == 2 )
v = prolate_ZernikeNorm_ex(prolate_dat.p,prolate_dat.N, prolate_dat.cfs(:,prolate_ids+1), xx) ;
end
|
github
|
lederman/Prol-master
|
prolate_crea.m
|
.m
|
Prol-master/src/matlab/prolate_crea.m
| 6,511 |
utf_8
|
5a20b52f509115bbf50f57247e82a50e
|
function [prolate_dat, iserr , prolate_dat_tmp] = prolate_crea(c, D, N, minEigenvalRatio, matdim , prolate_crea_options)
%
% prolate_crea creates a data-structure for computing a family of
% generalized prolate spheroidal functions for dimension D and order N.
%
% Input:
% * c : prolate truncation frequency
% * D : prolate dimension (D=p+2)
% * N : prolate order
% * minEigenvalRatio : keep only prolates the with eigenvalue \gamma s.t.
% | \gamma_n | > c^{-1/2} * minEigenvalRatio .
% The reason is that for small n and large c, c^{1/2}| \gamma_n | -> 1
% * matdim : the dimensionality of teh matrix used in precomputation.
% This is a technical parameter which will be removed in future versions.
% If this number is too small, a warning will be generated.
% If this number is too large, the precomputation can be slow.
% * prolate_crea_options : optional patameters
% isfast : run faster computation without fixing the eigenvectors?
% This will be removed in future versions.
%
% Output:
% * prolate_dat : data structure to be used in prolate_ev
% * iserr : error code
% 0 : no error
% 1 : empty set of prolates.
% 10 : matdim may be too small (based on number of prolates kept)
% 100 : matdim may be too small (based on number of coefficients kept)
%
%
%
% TODO:
% * Remove Matlab eig for more accurate computation of eigenvectors'
% elements without the second pass on the eigenvectors.
% * Introduce wrapper to compute matdim
% * Introduce warpper to compute for all N.
% * Add user control of accuracy.
%
assert(round(D)==D) % Integer dimension
assert(D>1) % One dimensional case to be treated separately
assert(c>0) % Physical c
assert(N>=0) % Physical N
assert(round(N)==N)
assert(minEigenvalRatio<1) % otherwise, what is the point?
assert(minEigenvalRatio > 0) % Eigenvalues must be truncated
assert(matdim > 10) % The matrix for computing the coefficients cannot be too small
isfast = 1; % don't bypass the eigenvector correction
isfixfirst = 1; % don't bypass the eigenvector correction
if exist('prolate_crea_options')
if isfield(prolate_crea_options,'isfast')
isfast = prolate_crea_options.isfast;
end
if isfield(prolate_crea_options,'isfixfirst')
isfixfirst = prolate_crea_options.isfixfirst;
end
end
%
% Parameters
%
iserr = 0;
prolate_dat.type = 2;
prolate_dat.c = c;
prolate_dat.D = D;
prolate_dat.p = D-2;
prolate_dat.N = N;
prolate_dat.creaparam.minEigenvalRatio = minEigenvalRatio;
prolate_dat.creaparam.matdim = matdim;
prolate_dat.evparam.cfs_eps = eps(1.0)/100;
%
% differential equation eigenproblem in matrix form, the eigenvectors
% ate the coefficients of the prolats.
%
% TODO: replace the full matrix operation.
[mat, vdiag, voffdiag] = prolate_diffop_mat_full(prolate_dat.c, prolate_dat.p ,prolate_dat.N , prolate_dat.creaparam.matdim-1);
[u,d] = eig(mat);
% Note that Matlab eig truncates some small coefficients by setting them to 0.
[eigvals,eigvals_order] = sort(diag(d),'descend');
eigvecs = u(:,eigvals_order);
eigvecs = bsxfun(@times, eigvecs, sign(eigvecs(1,:)).*(-1).^[0:matdim-1] ); % standard sign. Assumes accurate first element.
%
% temporary data structure that stores data before truncation
%
prolate_dat_tmp = prolate_dat;
prolate_dat_tmp.cfs = eigvecs;
prolate_dat_tmp.diffeigs = eigvals;
% fix the first eigenvector to reduce the scope of numerical inaccuracy
% due to eigenvector truncation in Matlab's eig.
if (isfixfirst==1)
prolate_dat_tmp.cfs(:,1) = prolate_crea_fix_eigenvec(mat, prolate_dat_tmp , 1);
end
% compute the first eigenvalue
%gam0num = prolate_numericalgam(prolate_dat_tmp, 0);% TODO: replace with approximate maximum using WKB and/or Newton method search.
gam0 = prolate_analyticgam(prolate_dat_tmp, 0);
% Compute the rest of the eigenvalues through recurrsion
[ratios , ~] = prolate_crea_eigRatios(prolate_dat_tmp);
prolate_dat_tmp.gams = gam0 * ratios;
prolate_dat_tmp.nu_abs = abs(gam0 * ratios * prolate_dat.c^(1/2));
% Find where to truncate
ids_prolate_to_discard = find( prolate_dat_tmp.nu_abs <= prolate_dat.creaparam.minEigenvalRatio );
ids_prolate_to_keep = [1:min(ids_prolate_to_discard)-1+1];
if (isempty(ids_prolate_to_keep))
warning('No prolates to keep');
iserr = 1;
return
end
% Note that Matlab eig truncates the small coefficients by setting them to 0.
abs_cfs = max( abs(prolate_dat_tmp.cfs (:,[1:ids_prolate_to_keep(end)])), [], 2) ;
cfs_to_keep = find( abs_cfs >= prolate_dat.evparam.cfs_eps );
cfs_to_keep = [1:max(cfs_to_keep)];
% truncated coefficients
prolate_dat.cfs = prolate_dat_tmp.cfs( 1:cfs_to_keep(end) , 1:ids_prolate_to_keep(end) ) ;
% the various forms of the integral operator eigenvalues
prolate_dat.gam = gam0 * ratios(1:ids_prolate_to_keep(end) );
prolate_dat.bet = prolate_dat.gam/(prolate_dat.c^((prolate_dat.p+1)/2));
prolate_dat.alp = prolate_dat.bet * (1i)^prolate_dat.N * (2*pi)^(1+prolate_dat.p/2);
prolate_dat.nu = (1i)^prolate_dat.N * prolate_dat.c^(1/2) * prolate_dat.gam;
% the differential operator eigenvalues
prolate_dat.chi = eigvals(1:ids_prolate_to_keep(end));
prolate_dat.num_prols = ids_prolate_to_keep(end);
%
% Warnings
% Take plenty of margin for the truncation.
%
if (ids_prolate_to_keep(end)+20 >= matdim)
warning('prolate_crea: insufficient margin in matrix size (number of prolates)')
iserr = iserr+10;
end
if (cfs_to_keep(end)+40 >= matdim)
warning('prolate_crea: insufficient margin in matrix size (number of coefficients)')
iserr = iserr+100;
end
end
function v = prolate_crea_fix_eigenvec(mat, prolate_dat , jj)
tmpmat = mat-eye(prolate_dat.creaparam.matdim)*...
(prolate_dat.diffeigs(jj)+(prolate_dat.diffeigs(jj+1)-prolate_dat.diffeigs(jj))/10^5/(jj+1) );
v=tmpmat\prolate_dat.cfs(:,jj);
v = v/norm(v);
end
|
github
|
lederman/Prol-master
|
matlab_addpath_prol_src.m
|
.m
|
Prol-master/src/matlab/matlab_addpath_prol_src.m
| 224 |
utf_8
|
f61dd8a0f775bcd01a4600503a733dc1
|
%
% Add to path
%
function matlab_addpath_prol_src()
path_to_pkg = fileparts(mfilename('fullpath'));
addp = @(d)(addpath(fullfile(path_to_pkg, d)));
addp('');
addp('polynomials');
addp('service');
end
|
github
|
lederman/Prol-master
|
prolate_analyticgam.m
|
.m
|
Prol-master/src/matlab/service/prolate_analyticgam.m
| 1,598 |
utf_8
|
a8cef2c41c56b40443b2f172ac32a1a7
|
function gam = prolate_analyticgam(prolate_dat, n)
%
% Computation of the n-th eigenvalue of the integral operator.
% Uses the data structure prolate_dat created by prolate_crea.
%
% Generally speaking, this function should only be used for computing the
% eigenvalue for n=0 by prolate_crea.
%
% Input:
% * prolate_dat : precomputed prolate information.
% * n : the id of the eigenvalue to be computed.
% This would usually be n=0.
% Output:
% * gam : the eigenvalue \gamma_n
%
% Todo: remove dependency on undocumented properties of the eigenvector
% computation in matlab.
%
% Note: this function can be more sensitive to the truncation of the list
% of coefficients.
%
% coefficients of the chosen prolate
cfs = prolate_dat.cfs(:,n+1);
% extract parameters
N=prolate_dat.N;
p=prolate_dat.p;
c=prolate_dat.c;
k=[0:length(cfs)-1]';
% parts of the computation
cfs1 = (-1).^k .* sqrt(2+4*k+2*N+p) .* (2+2*N+p) /2;
cfs2n = (N+p/2 + k);
cfs2n(1) = gamma(1+ N+p/2 );
cfs2d = k;
cfs2d(1) = 1;
cfs2 = cumprod(cfs2n./cfs2d);
%
% Safety truncation to avoid inf.
%
mytrunc1 = find(abs(cfs2)>realmax*10^-10);
mytrunc2 = find(abs(cfs)<realmin*10^10);
mytrunc = min([mytrunc1,mytrunc2]);
if ~isempty(mytrunc)
cfs = cfs(1:mytrunc);
cfs1 = cfs1(1:mytrunc);
cfs2 = cfs2(1:mytrunc);
end
%
% Compute \gamma
%
num = 2^(-(N+p/2+1)) * c^(N+p/2+0.5) * sqrt(2+2*N+p) * cfs(1);
denom = sum( cfs.*cfs1.*cfs2 );
gam = num/denom;
end
|
github
|
lederman/Prol-master
|
prolate_numericalgam.m
|
.m
|
Prol-master/src/matlab/service/prolate_numericalgam.m
| 1,757 |
utf_8
|
96fdcfafa9bf6765fd02db80606f0be4
|
function gam = prolate_numericalgam(prolate_dat, n)
%
% Numerical computation of the n-th eigenvalue of the integral operator.
% Uses the data structure prolate_dat created by prolate_crea.
%
% Generally speaking, this function should only be used for computing the
% eigenvalue for n=0 by prolate_crea, and should not be used otherwise.
%
% Input:
% * prolate_dat : precomputed prolate information.
% * n : the id of the eigenvalue to be computed.
% This would usually be n=0.
% Output:
% * gam : the eigenvalue \gamma_n
%
% Todo: remove matlab dependency
assert( prolate_dat.type == 2 )
% find a large enough point
% TODO: replace with approximate maximum using WKB and/or Newton method search.
xx0 = linspace(0,1,1000)';
% prolate
[v] = prolate_ev(prolate_dat, [n], xx0);
% weighted prolate: \phi_n(x) = x^{(p+1)/2} \Phi_n (x);
v=bsxfun(@times, xx0.^((prolate_dat.p+1)/2) , v);
% find max
[xmax_v,xmax_id] = max(abs(v));
xmax_v = v(xmax_id);
xmax = xx0(xmax_id);
% truncate the vector of coefficients
vec = prolate_dat.cfs(:,n+1);
tmpkeep = find(abs(vec) >= prolate_dat.evparam.cfs_eps);
idskeep=tmpkeep(end);
vec((idskeep+1):end) = [];
%
% numerical integration:
%
% function to integrate
fun = @(y) reshape( besselj(prolate_dat.N+prolate_dat.p/2, prolate_dat.c*xmax*y(:)).*sqrt(prolate_dat.c *xmax *y(:)) .*y(:).^((prolate_dat.p+1)/2) .* prolate_ZernikeNorm_ex(prolate_dat.p,prolate_dat.N, vec, y(:)) , size(y) );
% integration:
%q1 = integral( fun,0,1 ); % matlab only
q1 = quad( fun,0,1, eps(xmax_v)*2 ); % compatible with Octave
%
% The eigenvalue is the ratio:
%
gam = q1 / xmax_v;
end
|
github
|
lederman/Prol-master
|
prolate_diffop_mat_tridiag.m
|
.m
|
Prol-master/src/matlab/service/prolate_diffop_mat_tridiag.m
| 1,607 |
utf_8
|
f7308e7a142a5c14badff37d8e20c56c
|
function [vdiag, voffdiag] = prolate_diffop_mat_tridiag(c,p,N,maxk)
%
% Computes the matrix representation of the differential operator,
% in the basis of Zernike polynomials.
%
% Input:
% * c,p,N : prolate parameters.
% * maxk : matrix truncations: the dimensionality of the matrix is k+1
% Output:
% * vdiag : vector of the elements of the matrix diagonal
% * voffdiad : vector of coefficients of the matrix off diagonal
%
% Note that the matrix is symmetric tridiagonal. All other elements are
% zeros.
%
vdiag = zeros(maxk+1,1);
voffdiag = zeros(maxk,1);
for k=0:maxk
vdiag(k+1,1) = prolate_diffop_mat_diag_element(c,p,N,k);
end
for k=0:maxk-1
voffdiag(k+1,1) = prolate_diffop_mat_offdiag_element(c,p,N,k);
end
end
function v = prolate_diffop_mat_offdiag_element(c,p,N,k)
%
% helper function for prolate_diffop_mat_tridiag.
% offdiagonal elements.
%
nn = k+1;
NN = N+p/2;
if nn<=0
v=0;
else
v = -(c^2*nn)/((2*nn+NN)*(2*nn+NN+1)) * (nn+NN)/(sqrt(1-2/(1+2*nn+NN)));
end
end
function v = prolate_diffop_mat_diag_element(c,p,N,k)
%
% helper function for prolate_diffop_mat_tridiag.
% elements on the diagonal.
%
NN=N+p/2;
if (NN==0)&&(k==0)
v = -( prolate_diffop_mat_kappa(p,N,k) +c^2/2 );
else
v = -( prolate_diffop_mat_kappa(p,N,k) +c^2*(2*k*(k+1)+NN*(2*k+NN+1))/((2*k+NN)*(2*k+NN+2)) );
end
end
function kap = prolate_diffop_mat_kappa(p,N,k)
%
% helper function for prolate_diffop_mat_tridiag.
%
NN = N+p/2;
kap = (NN+2*k+1/2)*(NN+2*k+3/2);
end
|
github
|
lederman/Prol-master
|
prolate_diffop_mat_full.m
|
.m
|
Prol-master/src/matlab/service/prolate_diffop_mat_full.m
| 834 |
utf_8
|
ca3ead2addc46e97c38d150ba121df6b
|
function [mat, vdiag, voffdiag ] = prolate_diffop_mat_full(c,p,N,maxk)
%
% Computes the full matrix representation of the differential operator,
% in the basis of Zernike polynomials.
%
% Input:
% * c,p,N : prolate parameters.
% * maxk : matrix truncations: the dimensionality of the matrix is k+1
% Output:
% * mat : the matrix
% * vdiag : vector of the elements of the matrix diagonal
% * voffdiad : vector of coefficients of the matrix off diagonal
%
% Note that the matrix is symmetric tridiagonal. All other elements are
% zeros.
%
mat = zeros(maxk+1);
[vdiag, voffdiag] = prolate_diffop_mat_tridiag(c,p,N,maxk);
for k=0:maxk
mat(k+1,k+1) = vdiag(k+1);
end
for k=0:maxk-1
mat(k+1,k+2) = voffdiag(k+1);
mat(k+2,k+1) = voffdiag(k+1);
end
end
|
github
|
lederman/Prol-master
|
prolate_ZernikeNorm_ex.m
|
.m
|
Prol-master/src/matlab/polynomials/prolate_ZernikeNorm_ex.m
| 796 |
utf_8
|
32da78a7d24f02fd89e8f9c58e238999
|
function v = prolate_ZernikeNorm_ex(p,N,cfsvec,xx)
%
%
% Evaluates functions expanded in the basis of normalized Zernike
% polynomials.
%
% v(i,j) = \sum_{q=0}^{k-1} cfsvec(q,j) \hat{R}_{N,n,p}_q(x_i)
%
%
% Input:
% * p,N : the p,N parameters of the Zernike polynomials to use here.
% * cfsvec : k x m matrix.
% Columns of coefficients, each column has the k coefficients of an expansion
% in Zernike polynoimals of order k-1 for one of the m different functions.
% * xx : a vector of length l.
% each entry is a value of x where each one of the k expansions should be evaluated.
% Output:
% * v : l x m matrix.
% The j-th column is the j-th function evaluated at the l points.
%
%
v = prolate_ZernikeNorm_ex_fromJacobi(p,N,cfsvec,xx) ;
end
|
github
|
lederman/Prol-master
|
prolate_xdZernikeNorm_coef.m
|
.m
|
Prol-master/src/matlab/polynomials/prolate_xdZernikeNorm_coef.m
| 1,054 |
utf_8
|
e1d65d8fdd68d9868f8e127161c4ed41
|
function dvec = prolate_xdZernikeNorm_coef(p,N,vec)
%
% Computes the expansion of xf'(x) in the basis of Zernike polynomials,
% where f(x) is given in the basis of Zernike polynomials.
%
% Input:
% * p,N : Prolate/Zernike parameters.
% * vec : vector (or multiple vectors in multiple columns) of the
% coefficients of funtions, expanded in the basis of Zernike polynomials.
%
% Output:
% * dvec : vector (or multiple vectors in multiple columns) of the
% coefficients of the expansion of xf'(x).
%
dvec = 0 * vec;
tmpvec = vec;
tmpjacvec = 0*vec;
for n=size(vec,1)-1:-1:0
dvec(n+1,:) = dvec(n+1,:) + sqrt(2*n + N + p/2 + 1)/sqrt(2)/(n + N + p/2 + 1) * tmpjacvec(n+1,:);
dvec(n+1,:) = dvec(n+1,:) + (2*n+N) * (tmpvec(n+1,:));
if (n==0)
break
end
tmpjacvec(n,:)= (n + N + p/2)/(n + N + p/2 + 1) * tmpjacvec(n+1,:);
tmpjacvec(n,:) = tmpjacvec(n,:) + (2*(n + N) + p)* sqrt(2*(2*n + N + p/2 + 1)) * tmpvec(n+1,:); % Jacobi component
end
end
|
github
|
lederman/Prol-master
|
prolate_ZernikeNorm_ex_fromJacobi.m
|
.m
|
Prol-master/src/matlab/polynomials/prolate_ZernikeNorm_ex_fromJacobi.m
| 1,221 |
utf_8
|
4cfe0e16b432763a99f8566676137ce5
|
function v = prolate_ZernikeNorm_ex_fromJacobi(p,N,cfsvec,xx)
%
% Evaluates functions expanded in the basis of normalized Zernike polynomials
% using Jacobi polynomials.
%
% v(i,j) = \sum_{q=0}^{k-1} cfsvec(q,j) \hat{R}_{N,n,p}_q(x_i)
%
% Using Jacobi polynomials:
% \hat{R}_{N,n,p}_q(x_i) = (-1)^n \sqrt{2(2n+N+p/2+1)} x^N P^{(N+p/2,0)}_n(1-2x^2)
%
% Input:
% * p,N : the p,N parameters of the Zernike polynomials to use here.
% * cfsvec : k x m matrix.
% Columns of coefficients, each column has the k coefficients of an expansion
% in Zernike polynoimals of order k-1 for one of the m different functions.
% * xx : a vector of length l.
% each entry is a value of x where each one of the k expansions should be evaluated.
% Output:
% * v : l x m matrix.
% The j-th column is the j-th function evaluated at the l points.
%
%
b=0;
a=N+p/2;
yy = 1-2 * xx(:).^2;
K = size(cfsvec,1)-1;
cfsvec_jac = cfsvec;
cfsvec_jac = bsxfun(@times, (-1).^[0:K]', cfsvec_jac);
cfsvec_jac = bsxfun(@times, sqrt(2*(2*[0:K]' + N + p/2 + 1)), cfsvec_jac);
v = prolate_JacobiP_ex(a,b,cfsvec_jac,yy) ;
v=bsxfun(@times, xx(:).^N , v);
end
|
github
|
ngcthuong/CSNet-master
|
Cal_PSNRSSIM.m
|
.m
|
CSNet-master/utilities/Cal_PSNRSSIM.m
| 6,250 |
utf_8
|
891b4e57ebcd097592850eecf97f150e
|
function [psnr_cur, ssim_cur] = Cal_PSNRSSIM(A,B,row,col)
[n,m,ch]=size(B);
A = A(row+1:n-row,col+1:m-col,:);
B = B(row+1:n-row,col+1:m-col,:);
A=double(A); % Ground-truth
B=double(B); %
e=A(:)-B(:);
mse=mean(e.^2);
psnr_cur=10*log10(255^2/mse);
if ch==1
[ssim_cur, ~] = ssim_index(A, B);
else
ssim_cur = -1;
end
function [mssim, ssim_map] = ssim_index(img1, img2, K, window, L)
%========================================================================
%SSIM Index, Version 1.0
%Copyright(c) 2003 Zhou Wang
%All Rights Reserved.
%
%The author is with Howard Hughes Medical Institute, and Laboratory
%for Computational Vision at Center for Neural Science and Courant
%Institute of Mathematical Sciences, New York University.
%
%----------------------------------------------------------------------
%Permission to use, copy, or modify this software and its documentation
%for educational and research purposes only and without fee is hereby
%granted, provided that this copyright notice and the original authors'
%names appear on all copies and supporting documentation. This program
%shall not be used, rewritten, or adapted as the basis of a commercial
%software or hardware product without first obtaining permission of the
%authors. The authors make no representations about the suitability of
%this software for any purpose. It is provided "as is" without express
%or implied warranty.
%----------------------------------------------------------------------
%
%This is an implementation of the algorithm for calculating the
%Structural SIMilarity (SSIM) index between two images. Please refer
%to the following paper:
%
%Z. Wang, A. C. Bovik, H. R. Sheikh, and E. P. Simoncelli, "Image
%quality assessment: From error measurement to structural similarity"
%IEEE Transactios on Image Processing, vol. 13, no. 1, Jan. 2004.
%
%Kindly report any suggestions or corrections to [email protected]
%
%----------------------------------------------------------------------
%
%Input : (1) img1: the first image being compared
% (2) img2: the second image being compared
% (3) K: constants in the SSIM index formula (see the above
% reference). defualt value: K = [0.01 0.03]
% (4) window: local window for statistics (see the above
% reference). default widnow is Gaussian given by
% window = fspecial('gaussian', 11, 1.5);
% (5) L: dynamic range of the images. default: L = 255
%
%Output: (1) mssim: the mean SSIM index value between 2 images.
% If one of the images being compared is regarded as
% perfect quality, then mssim can be considered as the
% quality measure of the other image.
% If img1 = img2, then mssim = 1.
% (2) ssim_map: the SSIM index map of the test image. The map
% has a smaller size than the input images. The actual size:
% size(img1) - size(window) + 1.
%
%Default Usage:
% Given 2 test images img1 and img2, whose dynamic range is 0-255
%
% [mssim ssim_map] = ssim_index(img1, img2);
%
%Advanced Usage:
% User defined parameters. For example
%
% K = [0.05 0.05];
% window = ones(8);
% L = 100;
% [mssim ssim_map] = ssim_index(img1, img2, K, window, L);
%
%See the results:
%
% mssim %Gives the mssim value
% imshow(max(0, ssim_map).^4) %Shows the SSIM index map
%
%========================================================================
if (nargin < 2 || nargin > 5)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
if (size(img1) ~= size(img2))
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
[M N] = size(img1);
if (nargin == 2)
if ((M < 11) || (N < 11))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
window = fspecial('gaussian', 11, 1.5); %
K(1) = 0.01; % default settings
K(2) = 0.03; %
L = 255; %
end
if (nargin == 3)
if ((M < 11) || (N < 11))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
window = fspecial('gaussian', 11, 1.5);
L = 255;
if (length(K) == 2)
if (K(1) < 0 || K(2) < 0)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
else
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
end
if (nargin == 4)
[H W] = size(window);
if ((H*W) < 4 || (H > M) || (W > N))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
L = 255;
if (length(K) == 2)
if (K(1) < 0 || K(2) < 0)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
else
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
end
if (nargin == 5)
[H W] = size(window);
if ((H*W) < 4 || (H > M) || (W > N))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
if (length(K) == 2)
if (K(1) < 0 || K(2) < 0)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
else
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
end
C1 = (K(1)*L)^2;
C2 = (K(2)*L)^2;
window = window/sum(sum(window));
img1 = double(img1);
img2 = double(img2);
mu1 = filter2(window, img1, 'valid');
mu2 = filter2(window, img2, 'valid');
mu1_sq = mu1.*mu1;
mu2_sq = mu2.*mu2;
mu1_mu2 = mu1.*mu2;
sigma1_sq = filter2(window, img1.*img1, 'valid') - mu1_sq;
sigma2_sq = filter2(window, img2.*img2, 'valid') - mu2_sq;
sigma12 = filter2(window, img1.*img2, 'valid') - mu1_mu2;
if (C1 > 0 & C2 > 0)
ssim_map = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2));
else
numerator1 = 2*mu1_mu2 + C1;
numerator2 = 2*sigma12 + C2;
denominator1 = mu1_sq + mu2_sq + C1;
denominator2 = sigma1_sq + sigma2_sq + C2;
ssim_map = ones(size(mu1));
index = (denominator1.*denominator2 > 0);
ssim_map(index) = (numerator1(index).*numerator2(index))./(denominator1(index).*denominator2(index));
index = (denominator1 ~= 0) & (denominator2 == 0);
ssim_map(index) = numerator1(index)./denominator1(index);
end
mssim = mean2(ssim_map);
return
|
github
|
ngcthuong/CSNet-master
|
test_network_v02.m
|
.m
|
CSNet-master/utilities/test_network_v02.m
| 3,766 |
utf_8
|
6abb3286637df8403f7e640f9b53db51
|
function net = CSNet_init
global featureSize noLayer blkSize subRate;
test = 1;
if test == 1
featureSize = 64;
noLayer = 7;
blkSize = 32;
subRate = 0.1;
end
noMeas = round(subRate * blkSize ^2);
%%% 17 layers
b_min = 0.025;
lr11 = [1 1];
lr10 = [1 0];
lr00 = [0 0];
weightDecay = [1 0];
meanvar = [zeros(featureSize,1,'single'), 0.01*ones(featureSize,1,'single')];
% Define network
net.layers = {} ;
%% 1. Sampling layer - for gray image
% Sampling network, with kernel size of blkSize x blkSize, do no use
% bias --> initialized as zero and learn rate = 0.
% Load sensing matrix of size blkSizexBlkSize
trial = 1;
fileName = ['SensingMtxs\BlkSize' num2str(blkSize) '_trial' num2str(trial) '.mat' ];
if ~(exist(fileName))
Phi_Full = orth(rand(blkSize^2, blkSize^2));
save(fileName, 'Phi_Full');
else
load(fileName);
Phi = single(Phi_Full(1:noMeas, :));
end
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{zeros(blkSize, blkSize, 1, noMeas,'single'), zeros(featureSize,1,'single')}}, ...
'stride', blkSize, ...
'pad', 0, ...
'dilate',1, ...
'learningRate',lr00, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
% net.layers{end+1} = struct('type', 'relu','leak',0) ; -- do not use relu
% assign the sampling matrix
W = zeros(blkSize, blkSize, 1, noMeas);
for i = 1:1:noMeas
W(:, :, 1, i) = reshape(Phi(i, :), blkSize, blkSize);
end
net.layers{1}.weights(1) = {single(W)};
% im = double(imread('cameraman.tif'));
%% 2. Initial reconstruction layer with 1x1 Convolution
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{zeros(1, 1, noMeas, blkSize*blkSize,'single'), zeros(featureSize,1,'single')}}, ...
'stride', 1, ...
'pad', 0, ...
'dilate',1, ...
'learningRate',lr11, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
W2 = zeros(1, 1, noMeas, blkSize*blkSize);
PhiInv = pinv(Phi);
for i = 1:1:noMeas
W2(:, :, i, :) = PhiInv(:, i);
end
net.layers{2}.weights(1) = {single(W2)};
%% 3. Reshape and concatinate to make recon. image
net.layers{end+1} = struct{'type', 'reshapeconcat'};
%% 4. Reconstruction network - DnCNN
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,1,featureSize,'single'), zeros(featureSize,1,'single')}}, ...
'stride', 1, ...
'pad', 1, ...
'dilate',1, ...
'learningRate',lr11, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
net.layers{end+1} = struct('type', 'relu','leak',0) ;
for i = 1:1:noLayer - 2
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,featureSize,featureSize,'single'), zeros(featureSize,1,'single')}}, ...
'stride', 1, ...
'learningRate',lr10, ...
'dilate',1, ...
'weightDecay',weightDecay, ...
'pad', 1, 'opts', {{}}) ;
net.layers{end+1} = struct('type', 'bnorm', ...
'weights', {{clipping(sqrt(2/(9*featureSize))*randn(featureSize,1,'single'),b_min), zeros(featureSize,1,'single'),meanvar}}, ...
'learningRate', [1 1 1], ...
'weightDecay', [0 0], ...
'opts', {{}}) ;
net.layers{end+1} = struct('type', 'relu','leak',0) ;
end
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,featureSize,1,'single'), zeros(1,1,'single')}}, ...
'stride', 1, ...
'learningRate',lr11, ...
'dilate',1, ...
'weightDecay',weightDecay, ...
'pad', 1, 'opts', {{}}) ;
net.layers{end+1} = struct('type', 'loss') ; % make sure the new 'vl_nnloss.m' is in the same folder.
% Fill in default values
net = vl_simplenn_tidy(net);
function A = clipping(A,b)
A(A>=0&A<b) = b;
A(A<0&A>-b) = -b;
|
github
|
ngcthuong/CSNet-master
|
Cal_PSNRSSIM.m
|
.m
|
CSNet-master/Data/utilities/Cal_PSNRSSIM.m
| 6,250 |
utf_8
|
891b4e57ebcd097592850eecf97f150e
|
function [psnr_cur, ssim_cur] = Cal_PSNRSSIM(A,B,row,col)
[n,m,ch]=size(B);
A = A(row+1:n-row,col+1:m-col,:);
B = B(row+1:n-row,col+1:m-col,:);
A=double(A); % Ground-truth
B=double(B); %
e=A(:)-B(:);
mse=mean(e.^2);
psnr_cur=10*log10(255^2/mse);
if ch==1
[ssim_cur, ~] = ssim_index(A, B);
else
ssim_cur = -1;
end
function [mssim, ssim_map] = ssim_index(img1, img2, K, window, L)
%========================================================================
%SSIM Index, Version 1.0
%Copyright(c) 2003 Zhou Wang
%All Rights Reserved.
%
%The author is with Howard Hughes Medical Institute, and Laboratory
%for Computational Vision at Center for Neural Science and Courant
%Institute of Mathematical Sciences, New York University.
%
%----------------------------------------------------------------------
%Permission to use, copy, or modify this software and its documentation
%for educational and research purposes only and without fee is hereby
%granted, provided that this copyright notice and the original authors'
%names appear on all copies and supporting documentation. This program
%shall not be used, rewritten, or adapted as the basis of a commercial
%software or hardware product without first obtaining permission of the
%authors. The authors make no representations about the suitability of
%this software for any purpose. It is provided "as is" without express
%or implied warranty.
%----------------------------------------------------------------------
%
%This is an implementation of the algorithm for calculating the
%Structural SIMilarity (SSIM) index between two images. Please refer
%to the following paper:
%
%Z. Wang, A. C. Bovik, H. R. Sheikh, and E. P. Simoncelli, "Image
%quality assessment: From error measurement to structural similarity"
%IEEE Transactios on Image Processing, vol. 13, no. 1, Jan. 2004.
%
%Kindly report any suggestions or corrections to [email protected]
%
%----------------------------------------------------------------------
%
%Input : (1) img1: the first image being compared
% (2) img2: the second image being compared
% (3) K: constants in the SSIM index formula (see the above
% reference). defualt value: K = [0.01 0.03]
% (4) window: local window for statistics (see the above
% reference). default widnow is Gaussian given by
% window = fspecial('gaussian', 11, 1.5);
% (5) L: dynamic range of the images. default: L = 255
%
%Output: (1) mssim: the mean SSIM index value between 2 images.
% If one of the images being compared is regarded as
% perfect quality, then mssim can be considered as the
% quality measure of the other image.
% If img1 = img2, then mssim = 1.
% (2) ssim_map: the SSIM index map of the test image. The map
% has a smaller size than the input images. The actual size:
% size(img1) - size(window) + 1.
%
%Default Usage:
% Given 2 test images img1 and img2, whose dynamic range is 0-255
%
% [mssim ssim_map] = ssim_index(img1, img2);
%
%Advanced Usage:
% User defined parameters. For example
%
% K = [0.05 0.05];
% window = ones(8);
% L = 100;
% [mssim ssim_map] = ssim_index(img1, img2, K, window, L);
%
%See the results:
%
% mssim %Gives the mssim value
% imshow(max(0, ssim_map).^4) %Shows the SSIM index map
%
%========================================================================
if (nargin < 2 || nargin > 5)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
if (size(img1) ~= size(img2))
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
[M N] = size(img1);
if (nargin == 2)
if ((M < 11) || (N < 11))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
window = fspecial('gaussian', 11, 1.5); %
K(1) = 0.01; % default settings
K(2) = 0.03; %
L = 255; %
end
if (nargin == 3)
if ((M < 11) || (N < 11))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
window = fspecial('gaussian', 11, 1.5);
L = 255;
if (length(K) == 2)
if (K(1) < 0 || K(2) < 0)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
else
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
end
if (nargin == 4)
[H W] = size(window);
if ((H*W) < 4 || (H > M) || (W > N))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
L = 255;
if (length(K) == 2)
if (K(1) < 0 || K(2) < 0)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
else
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
end
if (nargin == 5)
[H W] = size(window);
if ((H*W) < 4 || (H > M) || (W > N))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
if (length(K) == 2)
if (K(1) < 0 || K(2) < 0)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
else
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
end
C1 = (K(1)*L)^2;
C2 = (K(2)*L)^2;
window = window/sum(sum(window));
img1 = double(img1);
img2 = double(img2);
mu1 = filter2(window, img1, 'valid');
mu2 = filter2(window, img2, 'valid');
mu1_sq = mu1.*mu1;
mu2_sq = mu2.*mu2;
mu1_mu2 = mu1.*mu2;
sigma1_sq = filter2(window, img1.*img1, 'valid') - mu1_sq;
sigma2_sq = filter2(window, img2.*img2, 'valid') - mu2_sq;
sigma12 = filter2(window, img1.*img2, 'valid') - mu1_mu2;
if (C1 > 0 & C2 > 0)
ssim_map = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2));
else
numerator1 = 2*mu1_mu2 + C1;
numerator2 = 2*sigma12 + C2;
denominator1 = mu1_sq + mu2_sq + C1;
denominator2 = sigma1_sq + sigma2_sq + C2;
ssim_map = ones(size(mu1));
index = (denominator1.*denominator2 > 0);
ssim_map(index) = (numerator1(index).*numerator2(index))./(denominator1(index).*denominator2(index));
index = (denominator1 ~= 0) & (denominator2 == 0);
ssim_map(index) = numerator1(index)./denominator1(index);
end
mssim = mean2(ssim_map);
return
|
github
|
ngcthuong/CSNet-master
|
CSNet_init.m
|
.m
|
CSNet-master/TrainingCode/CSNet_v03/CSNet_init.m
| 3,597 |
utf_8
|
f6f53c2bb1c1455b8cf8497263f2e338
|
function net = CSNet_init
global featureSize noLayer blkSize subRate isLearnMtx;
test = 0;
if test == 1
featureSize = 64;
noLayer = 7;
blkSize = 32;
subRate = 0.1;
end
noMeas = round(subRate * blkSize ^2);
%%% 17 layers
b_min = 0.025;
lr11 = [1 1];
lr10 = [1 0];
lr00 = [0 0];
weightDecay = [1 0];
meanvar = [zeros(featureSize,1,'single'), 0.01*ones(featureSize,1,'single')];
% Define network
net.layers = {} ;
%% 1. Sampling layer - for gray image
% Sampling network, with kernel size of blkSize x blkSize, do no use
% bias --> initialized as zero and learn rate = 0.
% Load sensing matrix of size blkSizexBlkSize
trial = 1;
fileName = ['SensingMtxs\BlkSize' num2str(blkSize) '_trial' num2str(trial) '.mat' ];
if ~(exist(fileName))
Phi_Full = orth(rand(blkSize^2, blkSize^2));
save(fileName, 'Phi_Full');
else
load(fileName);
Phi = single(Phi_Full(1:noMeas, :));
end
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{zeros(blkSize, blkSize, 1, noMeas,'single'), zeros(noMeas,1,'single')}}, ...
'stride', blkSize, ...
'pad', 0, ...
'dilate',1, ...
'learningRate', isLearnMtx, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
% net.layers{end+1} = struct('type', 'relu','leak',0) ; -- do not use relu
% assign the sampling matrix
W = zeros(blkSize, blkSize, 1, noMeas);
for i = 1:1:noMeas
W(:, :, 1, i) = reshape(Phi(i, :), blkSize, blkSize);
end
net.layers{1}.weights(1) = {single(W)};
% im = double(imread('cameraman.tif'));
%% 2. Initial reconstruction layer with 1x1 Convolution
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{zeros(1, 1, noMeas, blkSize*blkSize,'single'), zeros(blkSize*blkSize,1,'single')}}, ...
'stride', 1, ...
'pad', 0, ...
'dilate',1, ...
'learningRate',lr10, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
W2 = zeros(1, 1, noMeas, blkSize*blkSize);
PhiInv = pinv(Phi);
for i = 1:1:noMeas
W2(:, :, i, :) = PhiInv(:, i);
end
net.layers{2}.weights(1) = {single(W2)};
%% 3. Reshape and concatinate to make recon. image
%net.layers{end+1} = struct('type', 'reshapeconcat');
net.layers{end+1} = struct('type', 'bcs_init_rec');
net.layers{end}.dims = [blkSize, blkSize];
%% 4. Reconstruction network - DnCNN
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,1,featureSize,'single'), zeros(featureSize,1,'single')}}, ...
'stride', 1, ...
'pad', 1, ...
'dilate',1, ...
'learningRate',lr11, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
net.layers{end+1} = struct('type', 'relu','leak',0) ;
for i = 1:1:noLayer - 2
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,featureSize,featureSize,'single'), zeros(featureSize,1,'single')}}, ...
'stride', 1, ...
'learningRate', lr10, ...
'dilate',1, ...
'weightDecay',weightDecay, ...
'pad', 1, 'opts', {{}}) ;
net.layers{end+1} = struct('type', 'relu','leak',0) ;
end
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,featureSize,1,'single'), zeros(1,1,'single')}}, ...
'stride', 1, ...
'learningRate', lr11, ...
'dilate',1, ...
'weightDecay',weightDecay, ...
'pad', 1, 'opts', {{}}) ;
net.layers{end+1} = struct('type', 'loss') ; % make sure the new 'vl_nnloss.m' is in the same folder.
% Fill in default values
net = vl_simplenn_tidy(net);
function A = clipping(A,b)
A(A>=0&A<b) = b;
A(A<0&A>-b) = -b;
|
github
|
ngcthuong/CSNet-master
|
CSNet_train.m
|
.m
|
CSNet-master/TrainingCode/CSNet_v03/CSNet_train.m
| 12,946 |
utf_8
|
dbf0bbf2dc7f04221f1c4cec58d49787
|
function [net, state] = CSNet_train(net, varargin)
% The function automatically restarts after each training epoch by
% checkpointing.
%
% The function supports training on CPU or on one or more GPUs
% (specify the list of GPU IDs in the `gpus` option).
% Copyright (C) 2014-16 Andrea Vedaldi.
% All rights reserved.
%
% This file is part of the VLFeat library and is made available under
% the terms of the BSD license (see the COPYING file).
%%%-------------------------------------------------------------------------
%%% solvers: SGD(default) and Adam with(default)/without gradientClipping
%%%-------------------------------------------------------------------------
%%% solver: Adam
opts.solver = 'Adam';
opts.beta1 = 0.9;
opts.beta2 = 0.999;
opts.alpha = 0.01;
opts.epsilon = 1e-8;
%%% solver: SGD
% opts.solver = 'SGD';
opts.learningRate = 0.01;
opts.weightDecay = 0.001;
opts.momentum = 0.9 ;
%%% GradientClipping
opts.gradientClipping = false;
opts.theta = 0.005;
%%% specific parameter for Bnorm
opts.bnormLearningRate = 0;
%%%-------------------------------------------------------------------------
%%% setting for simplenn
%%%-------------------------------------------------------------------------
opts.conserveMemory = true;
opts.mode = 'normal';
opts.cudnn = true ;
opts.backPropDepth = +inf ;
opts.skipForward = false;
opts.numSubBatches = 1;
%%%-------------------------------------------------------------------------
%%% setting for model
%%%-------------------------------------------------------------------------
opts.batchSize = 128 ;
opts.gpus = [];
opts.numEpochs = 300 ;
opts.modelName = 'model';
opts.expDir = fullfile('data',opts.modelName) ;
opts.numberImdb = 1;
opts.imdbDir = opts.expDir;
%%%-------------------------------------------------------------------------
%%% update settings
%%%-------------------------------------------------------------------------
opts = vl_argparse(opts, varargin);
opts.numEpochs = numel(opts.learningRate);
if ~exist(opts.expDir, 'dir'), mkdir(opts.expDir) ; end
%%%-------------------------------------------------------------------------
%%% Initialization
%%%-------------------------------------------------------------------------
net = vl_simplenn_tidy(net); %%% fill in some eventually missing values
net.layers{end-1}.precious = 1;
vl_simplenn_display(net, 'batchSize', opts.batchSize) ;
state.getBatch = getBatch ;
%%%-------------------------------------------------------------------------
%%% Train and Test
%%%-------------------------------------------------------------------------
modelPath = @(ep) fullfile(opts.expDir, sprintf([opts.modelName,'-epoch-%d.mat'], ep));
start = findLastCheckpoint(opts.expDir,opts.modelName) ;
if start >= 1
fprintf('%s: resuming by loading epoch %d', mfilename, start) ;
load(modelPath(start), 'net') ;
net = vl_simplenn_tidy(net) ;
end
%%% load training data
opts.imdbPath = fullfile(opts.imdbDir);
imdb = load(opts.imdbPath) ;
opts.train = find(imdb.set==1);
for epoch = start+1 : opts.numEpochs
%%% Train for one epoch.
state.epoch = epoch ;
state.learningRate = opts.learningRate(min(epoch, numel(opts.learningRate)));
opts.thetaCurrent = opts.theta(min(epoch, numel(opts.theta)));
if numel(opts.gpus) == 1
net = vl_simplenn_move(net, 'gpu') ;
end
state.train = opts.train(randperm(numel(opts.train))) ; %%% shuffle
[net, state] = process_epoch(net, state, imdb, opts, 'train');
net.layers{end}.class =[];
net = vl_simplenn_move(net, 'cpu');
%%% save current model
save(modelPath(epoch), 'net')
end
%%%-------------------------------------------------------------------------
function [net, state] = process_epoch(net, state, imdb, opts, mode)
%%%-------------------------------------------------------------------------
if strcmp(mode,'train')
switch opts.solver
case 'SGD' %%% solver: SGD
for i = 1:numel(net.layers)
if isfield(net.layers{i}, 'weights')
for j = 1:numel(net.layers{i}.weights)
state.layers{i}.momentum{j} = 0;
end
end
end
case 'Adam' %%% solver: Adam
for i = 1:numel(net.layers)
if isfield(net.layers{i}, 'weights')
for j = 1:numel(net.layers{i}.weights)
state.layers{i}.t{j} = 0;
state.layers{i}.m{j} = 0;
state.layers{i}.v{j} = 0;
end
end
end
end
end
subset = state.(mode) ;
num = 0 ;
res = [];
for t=1:opts.batchSize:numel(subset)
for s=1:opts.numSubBatches
% get this image batch
batchStart = t + (s-1);
batchEnd = min(t+opts.batchSize-1, numel(subset)) ;
batch = subset(batchStart : opts.numSubBatches : batchEnd) ;
num = num + numel(batch) ;
if numel(batch) == 0, continue ; end
[inputs,labels] = state.getBatch(imdb, batch) ;
if numel(opts.gpus) == 1
inputs = gpuArray(inputs);
labels = gpuArray(labels);
end
if strcmp(mode, 'train')
dzdy = single(1);
evalMode = 'normal';%%% forward and backward (Gradients)
else
dzdy = [] ;
evalMode = 'test'; %%% forward only
end
net.layers{end}.class = labels ;
res = vl_simplenn(net, inputs, dzdy, res, ...
'accumulate', s ~= 1, ...
'mode', evalMode, ...
'conserveMemory', opts.conserveMemory, ...
'backPropDepth', opts.backPropDepth, ...
'cudnn', opts.cudnn) ;
end
if strcmp(mode, 'train')
[state, net] = params_updates(state, net, res, opts, opts.batchSize) ;
end
lossL2 = gather(res(end).x) ;
%%%--------add your code here------------------------
%%%--------------------------------------------------
fprintf('%s: epoch %02d dataset %02d: %3d/%3d:', mode, state.epoch, mod(state.epoch,opts.numberImdb), ...
fix((t-1)/opts.batchSize)+1, ceil(numel(subset)/opts.batchSize)) ;
fprintf('error: %f \n', lossL2) ;
end
%%%-------------------------------------------------------------------------
function [state, net] = params_updates(state, net, res, opts, batchSize)
%%%-------------------------------------------------------------------------
switch opts.solver
case 'SGD' %%% solver: SGD
for l=numel(net.layers):-1:1
for j=1:numel(res(l).dzdw)
if j == 3 && strcmp(net.layers{l}.type, 'bnorm')
%%% special case for learning bnorm moments
thisLR = net.layers{l}.learningRate(j) - opts.bnormLearningRate;
net.layers{l}.weights{j} = vl_taccum(...
1 - thisLR, ...
net.layers{l}.weights{j}, ...
thisLR / batchSize, ...
res(l).dzdw{j}) ;
else
thisDecay = opts.weightDecay * net.layers{l}.weightDecay(j);
thisLR = state.learningRate * net.layers{l}.learningRate(j);
if opts.gradientClipping
theta = opts.thetaCurrent/thisLR;
state.layers{l}.momentum{j} = opts.momentum * state.layers{l}.momentum{j} ...
- thisDecay * net.layers{l}.weights{j} ...
- (1 / batchSize) * gradientClipping(res(l).dzdw{j},theta) ;
net.layers{l}.weights{j} = net.layers{l}.weights{j} + ...
thisLR * state.layers{l}.momentum{j} ;
else
state.layers{l}.momentum{j} = opts.momentum * state.layers{l}.momentum{j} ...
- thisDecay * net.layers{l}.weights{j} ...
- (1 / batchSize) * res(l).dzdw{j} ;
net.layers{l}.weights{j} = net.layers{l}.weights{j} + ...
thisLR * state.layers{l}.momentum{j} ;
end
end
end
end
case 'Adam' %%% solver: Adam
for l=numel(net.layers):-1:1
for j=1:numel(res(l).dzdw)
if j == 3 && strcmp(net.layers{l}.type, 'bnorm')
%%% special case for learning bnorm moments
thisLR = net.layers{l}.learningRate(j) - opts.bnormLearningRate;
net.layers{l}.weights{j} = vl_taccum(...
1 - thisLR, ...
net.layers{l}.weights{j}, ...
thisLR / batchSize, ...
res(l).dzdw{j}) ;
else
thisLR = state.learningRate * net.layers{l}.learningRate(j);
state.layers{l}.t{j} = state.layers{l}.t{j} + 1;
t = state.layers{l}.t{j};
alpha = thisLR;
lr = alpha * sqrt(1 - opts.beta2^t) / (1 - opts.beta1^t);
state.layers{l}.m{j} = state.layers{l}.m{j} + (1 - opts.beta1) .* (res(l).dzdw{j} - state.layers{l}.m{j});
state.layers{l}.v{j} = state.layers{l}.v{j} + (1 - opts.beta2) .* (res(l).dzdw{j} .* res(l).dzdw{j} - state.layers{l}.v{j});
if opts.gradientClipping
theta = opts.thetaCurrent/lr;
net.layers{l}.weights{j} = net.layers{l}.weights{j} - lr * gradientClipping(state.layers{l}.m{j} ./ (sqrt(state.layers{l}.v{j}) + opts.epsilon),theta);
else
net.layers{l}.weights{j} = net.layers{l}.weights{j} - lr * state.layers{l}.m{j} ./ (sqrt(state.layers{l}.v{j}) + opts.epsilon);
end
% net.layers{l}.weights{j} = weightClipping(net.layers{l}.weights{j},2); % gradually clip the weights
end
end
end
end
%%%-------------------------------------------------------------------------
function epoch = findLastCheckpoint(modelDir,modelName)
%%%-------------------------------------------------------------------------
list = dir(fullfile(modelDir, [modelName,'-epoch-*.mat'])) ;
tokens = regexp({list.name}, [modelName,'-epoch-([\d]+).mat'], 'tokens') ;
epoch = cellfun(@(x) sscanf(x{1}{1}, '%d'), tokens) ;
epoch = max([epoch 0]) ;
%%%-------------------------------------------------------------------------
function A = gradientClipping(A, theta)
%%%-------------------------------------------------------------------------
A(A>theta) = theta;
A(A<-theta) = -theta;
%%%-------------------------------------------------------------------------
function A = weightClipping(A, theta)
%%%-------------------------------------------------------------------------
A(A>theta) = A(A>theta) -0.0005;
A(A<-theta) = A(A<-theta)+0.0005;
%%%-------------------------------------------------------------------------
function fn = getBatch
%%%-------------------------------------------------------------------------
fn = @(x,y) getSimpleNNBatch(x,y);
%%%-------------------------------------------------------------------------
function [inputs,labels] = getSimpleNNBatch(imdb, batch)
%%%-------------------------------------------------------------------------
inputs = imdb.inputs(:,:,:,batch);
rng('shuffle');
mode = randperm(8);
inputs = data_augmentation_CSNet(inputs, mode(1));
labels = inputs;
function image = data_augmentation_CSNet(image, mode)
if mode == 1
return;
end
if mode == 2 % flipped
image = flipud(image);
return;
end
if mode == 3 % rotation 90
image = fliplr(image);
return;
end
if mode == 4 % rotation 90 & flipped
image = fliplr(image);
image = flipud(image);
return;
end
function image = data_augmentation(image, mode)
if mode == 1
return;
end
if mode == 2 % flipped
image = flipud(image);
return;
end
if mode == 3 % rotation 90
image = rot90(image,1);
return;
end
if mode == 4 % rotation 90 & flipped
image = rot90(image,1);
image = flipud(image);
return;
end
if mode == 5 % rotation 180
image = rot90(image,2);
return;
end
if mode == 6 % rotation 180 & flipped
image = rot90(image,2);
image = flipud(image);
return;
end
if mode == 7 % rotation 270
image = rot90(image,3);
return;
end
if mode == 8 % rotation 270 & flipped
image = rot90(image,3);
image = flipud(image);
return;
end
|
github
|
ngcthuong/CSNet-master
|
Cal_PSNRSSIM.m
|
.m
|
CSNet-master/TrainingCode/CSNet_v03/utilities/Cal_PSNRSSIM.m
| 6,250 |
utf_8
|
891b4e57ebcd097592850eecf97f150e
|
function [psnr_cur, ssim_cur] = Cal_PSNRSSIM(A,B,row,col)
[n,m,ch]=size(B);
A = A(row+1:n-row,col+1:m-col,:);
B = B(row+1:n-row,col+1:m-col,:);
A=double(A); % Ground-truth
B=double(B); %
e=A(:)-B(:);
mse=mean(e.^2);
psnr_cur=10*log10(255^2/mse);
if ch==1
[ssim_cur, ~] = ssim_index(A, B);
else
ssim_cur = -1;
end
function [mssim, ssim_map] = ssim_index(img1, img2, K, window, L)
%========================================================================
%SSIM Index, Version 1.0
%Copyright(c) 2003 Zhou Wang
%All Rights Reserved.
%
%The author is with Howard Hughes Medical Institute, and Laboratory
%for Computational Vision at Center for Neural Science and Courant
%Institute of Mathematical Sciences, New York University.
%
%----------------------------------------------------------------------
%Permission to use, copy, or modify this software and its documentation
%for educational and research purposes only and without fee is hereby
%granted, provided that this copyright notice and the original authors'
%names appear on all copies and supporting documentation. This program
%shall not be used, rewritten, or adapted as the basis of a commercial
%software or hardware product without first obtaining permission of the
%authors. The authors make no representations about the suitability of
%this software for any purpose. It is provided "as is" without express
%or implied warranty.
%----------------------------------------------------------------------
%
%This is an implementation of the algorithm for calculating the
%Structural SIMilarity (SSIM) index between two images. Please refer
%to the following paper:
%
%Z. Wang, A. C. Bovik, H. R. Sheikh, and E. P. Simoncelli, "Image
%quality assessment: From error measurement to structural similarity"
%IEEE Transactios on Image Processing, vol. 13, no. 1, Jan. 2004.
%
%Kindly report any suggestions or corrections to [email protected]
%
%----------------------------------------------------------------------
%
%Input : (1) img1: the first image being compared
% (2) img2: the second image being compared
% (3) K: constants in the SSIM index formula (see the above
% reference). defualt value: K = [0.01 0.03]
% (4) window: local window for statistics (see the above
% reference). default widnow is Gaussian given by
% window = fspecial('gaussian', 11, 1.5);
% (5) L: dynamic range of the images. default: L = 255
%
%Output: (1) mssim: the mean SSIM index value between 2 images.
% If one of the images being compared is regarded as
% perfect quality, then mssim can be considered as the
% quality measure of the other image.
% If img1 = img2, then mssim = 1.
% (2) ssim_map: the SSIM index map of the test image. The map
% has a smaller size than the input images. The actual size:
% size(img1) - size(window) + 1.
%
%Default Usage:
% Given 2 test images img1 and img2, whose dynamic range is 0-255
%
% [mssim ssim_map] = ssim_index(img1, img2);
%
%Advanced Usage:
% User defined parameters. For example
%
% K = [0.05 0.05];
% window = ones(8);
% L = 100;
% [mssim ssim_map] = ssim_index(img1, img2, K, window, L);
%
%See the results:
%
% mssim %Gives the mssim value
% imshow(max(0, ssim_map).^4) %Shows the SSIM index map
%
%========================================================================
if (nargin < 2 || nargin > 5)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
if (size(img1) ~= size(img2))
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
[M N] = size(img1);
if (nargin == 2)
if ((M < 11) || (N < 11))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
window = fspecial('gaussian', 11, 1.5); %
K(1) = 0.01; % default settings
K(2) = 0.03; %
L = 255; %
end
if (nargin == 3)
if ((M < 11) || (N < 11))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
window = fspecial('gaussian', 11, 1.5);
L = 255;
if (length(K) == 2)
if (K(1) < 0 || K(2) < 0)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
else
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
end
if (nargin == 4)
[H W] = size(window);
if ((H*W) < 4 || (H > M) || (W > N))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
L = 255;
if (length(K) == 2)
if (K(1) < 0 || K(2) < 0)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
else
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
end
if (nargin == 5)
[H W] = size(window);
if ((H*W) < 4 || (H > M) || (W > N))
ssim_index = -Inf;
ssim_map = -Inf;
return
end
if (length(K) == 2)
if (K(1) < 0 || K(2) < 0)
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
else
ssim_index = -Inf;
ssim_map = -Inf;
return;
end
end
C1 = (K(1)*L)^2;
C2 = (K(2)*L)^2;
window = window/sum(sum(window));
img1 = double(img1);
img2 = double(img2);
mu1 = filter2(window, img1, 'valid');
mu2 = filter2(window, img2, 'valid');
mu1_sq = mu1.*mu1;
mu2_sq = mu2.*mu2;
mu1_mu2 = mu1.*mu2;
sigma1_sq = filter2(window, img1.*img1, 'valid') - mu1_sq;
sigma2_sq = filter2(window, img2.*img2, 'valid') - mu2_sq;
sigma12 = filter2(window, img1.*img2, 'valid') - mu1_mu2;
if (C1 > 0 & C2 > 0)
ssim_map = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2));
else
numerator1 = 2*mu1_mu2 + C1;
numerator2 = 2*sigma12 + C2;
denominator1 = mu1_sq + mu2_sq + C1;
denominator2 = sigma1_sq + sigma2_sq + C2;
ssim_map = ones(size(mu1));
index = (denominator1.*denominator2 > 0);
ssim_map(index) = (numerator1(index).*numerator2(index))./(denominator1(index).*denominator2(index));
index = (denominator1 ~= 0) & (denominator2 == 0);
ssim_map(index) = numerator1(index)./denominator1(index);
end
mssim = mean2(ssim_map);
return
|
github
|
ngcthuong/CSNet-master
|
test_network_v02.m
|
.m
|
CSNet-master/TrainingCode/CSNet_v03/utilities/test_network_v02.m
| 3,766 |
utf_8
|
6abb3286637df8403f7e640f9b53db51
|
function net = CSNet_init
global featureSize noLayer blkSize subRate;
test = 1;
if test == 1
featureSize = 64;
noLayer = 7;
blkSize = 32;
subRate = 0.1;
end
noMeas = round(subRate * blkSize ^2);
%%% 17 layers
b_min = 0.025;
lr11 = [1 1];
lr10 = [1 0];
lr00 = [0 0];
weightDecay = [1 0];
meanvar = [zeros(featureSize,1,'single'), 0.01*ones(featureSize,1,'single')];
% Define network
net.layers = {} ;
%% 1. Sampling layer - for gray image
% Sampling network, with kernel size of blkSize x blkSize, do no use
% bias --> initialized as zero and learn rate = 0.
% Load sensing matrix of size blkSizexBlkSize
trial = 1;
fileName = ['SensingMtxs\BlkSize' num2str(blkSize) '_trial' num2str(trial) '.mat' ];
if ~(exist(fileName))
Phi_Full = orth(rand(blkSize^2, blkSize^2));
save(fileName, 'Phi_Full');
else
load(fileName);
Phi = single(Phi_Full(1:noMeas, :));
end
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{zeros(blkSize, blkSize, 1, noMeas,'single'), zeros(featureSize,1,'single')}}, ...
'stride', blkSize, ...
'pad', 0, ...
'dilate',1, ...
'learningRate',lr00, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
% net.layers{end+1} = struct('type', 'relu','leak',0) ; -- do not use relu
% assign the sampling matrix
W = zeros(blkSize, blkSize, 1, noMeas);
for i = 1:1:noMeas
W(:, :, 1, i) = reshape(Phi(i, :), blkSize, blkSize);
end
net.layers{1}.weights(1) = {single(W)};
% im = double(imread('cameraman.tif'));
%% 2. Initial reconstruction layer with 1x1 Convolution
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{zeros(1, 1, noMeas, blkSize*blkSize,'single'), zeros(featureSize,1,'single')}}, ...
'stride', 1, ...
'pad', 0, ...
'dilate',1, ...
'learningRate',lr11, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
W2 = zeros(1, 1, noMeas, blkSize*blkSize);
PhiInv = pinv(Phi);
for i = 1:1:noMeas
W2(:, :, i, :) = PhiInv(:, i);
end
net.layers{2}.weights(1) = {single(W2)};
%% 3. Reshape and concatinate to make recon. image
net.layers{end+1} = struct{'type', 'reshapeconcat'};
%% 4. Reconstruction network - DnCNN
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,1,featureSize,'single'), zeros(featureSize,1,'single')}}, ...
'stride', 1, ...
'pad', 1, ...
'dilate',1, ...
'learningRate',lr11, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
net.layers{end+1} = struct('type', 'relu','leak',0) ;
for i = 1:1:noLayer - 2
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,featureSize,featureSize,'single'), zeros(featureSize,1,'single')}}, ...
'stride', 1, ...
'learningRate',lr10, ...
'dilate',1, ...
'weightDecay',weightDecay, ...
'pad', 1, 'opts', {{}}) ;
net.layers{end+1} = struct('type', 'bnorm', ...
'weights', {{clipping(sqrt(2/(9*featureSize))*randn(featureSize,1,'single'),b_min), zeros(featureSize,1,'single'),meanvar}}, ...
'learningRate', [1 1 1], ...
'weightDecay', [0 0], ...
'opts', {{}}) ;
net.layers{end+1} = struct('type', 'relu','leak',0) ;
end
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,featureSize,1,'single'), zeros(1,1,'single')}}, ...
'stride', 1, ...
'learningRate',lr11, ...
'dilate',1, ...
'weightDecay',weightDecay, ...
'pad', 1, 'opts', {{}}) ;
net.layers{end+1} = struct('type', 'loss') ; % make sure the new 'vl_nnloss.m' is in the same folder.
% Fill in default values
net = vl_simplenn_tidy(net);
function A = clipping(A,b)
A(A>=0&A<b) = b;
A(A<0&A>-b) = -b;
|
github
|
ngcthuong/CSNet-master
|
CSNet_init.m
|
.m
|
CSNet-master/TrainingCode/CSNet_v02/CSNet_init.m
| 3,501 |
utf_8
|
ea6f159161352a1a5852a8f290a8e6e3
|
function net = CSNet_init
global featureSize noLayer blkSize subRate isLearnMtx;
test = 0;
if test == 1
featureSize = 64;
noLayer = 7;
blkSize = 32;
subRate = 0.1;
end
noMeas = round(subRate * blkSize ^2);
%%% 17 layers
b_min = 0.025;
lr11 = [1 1];
lr10 = [1 0];
lr00 = [0 0];
weightDecay = [1 0];
meanvar = [zeros(featureSize,1,'single'), 0.01*ones(featureSize,1,'single')];
% Define network
net.layers = {} ;
%% 1. Sampling layer - for gray image
% Sampling network, with kernel size of blkSize x blkSize, do no use
% bias --> initialized as zero and learn rate = 0.
% Load sensing matrix of size blkSizexBlkSize
trial = 1;
fileName = ['SensingMtxs\BlkSize' num2str(blkSize) '_trial' num2str(trial) '.mat' ];
if ~(exist(fileName))
Phi_Full = orth(rand(blkSize^2, blkSize^2));
save(fileName, 'Phi_Full');
else
load(fileName);
Phi = single(Phi_Full(1:noMeas, :));
end
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{zeros(blkSize, blkSize, 1, noMeas,'single'), zeros(noMeas,1,'single')}}, ...
'stride', blkSize, ...
'pad', 0, ...
'dilate',1, ...
'learningRate', isLearnMtx, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
% net.layers{end+1} = struct('type', 'relu','leak',0) ; -- do not use relu
% assign the sampling matrix
W = zeros(blkSize, blkSize, 1, noMeas);
for i = 1:1:noMeas
W(:, :, 1, i) = reshape(Phi(i, :), blkSize, blkSize);
end
net.layers{1}.weights(1) = {single(W)};
% im = double(imread('cameraman.tif'));
%% 2. Initial reconstruction layer with 1x1 Convolution
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{zeros(1, 1, noMeas, blkSize*blkSize,'single'), zeros(blkSize*blkSize,1,'single')}}, ...
'stride', 1, ...
'pad', 0, ...
'dilate',1, ...
'learningRate',lr10, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
W2 = zeros(1, 1, noMeas, blkSize*blkSize);
PhiInv = pinv(Phi);
for i = 1:1:noMeas
W2(:, :, i, :) = PhiInv(:, i);
end
net.layers{2}.weights(1) = {single(W2)};
%% 3. Reshape and concatinate to make recon. image
net.layers{end+1} = struct('type', 'reshapeconcat');
%% 4. Reconstruction network - DnCNN
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,1,featureSize,'single'), zeros(featureSize,1,'single')}}, ...
'stride', 1, ...
'pad', 1, ...
'dilate',1, ...
'learningRate',lr11, ...
'weightDecay',weightDecay, ...
'opts',{{}}) ;
net.layers{end+1} = struct('type', 'relu','leak',0) ;
for i = 1:1:noLayer - 2
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,featureSize,featureSize,'single'), zeros(featureSize,1,'single')}}, ...
'stride', 1, ...
'learningRate', lr10, ...
'dilate',1, ...
'weightDecay',weightDecay, ...
'pad', 1, 'opts', {{}}) ;
net.layers{end+1} = struct('type', 'relu','leak',0) ;
end
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{sqrt(2/(9*featureSize))*randn(3,3,featureSize,1,'single'), zeros(1,1,'single')}}, ...
'stride', 1, ...
'learningRate', lr11, ...
'dilate',1, ...
'weightDecay',weightDecay, ...
'pad', 1, 'opts', {{}}) ;
net.layers{end+1} = struct('type', 'loss') ; % make sure the new 'vl_nnloss.m' is in the same folder.
% Fill in default values
net = vl_simplenn_tidy(net);
function A = clipping(A,b)
A(A>=0&A<b) = b;
A(A<0&A>-b) = -b;
|
github
|
ngcthuong/CSNet-master
|
CSNet_train.m
|
.m
|
CSNet-master/TrainingCode/CSNet_v02/CSNet_train.m
| 12,946 |
utf_8
|
dbf0bbf2dc7f04221f1c4cec58d49787
|
function [net, state] = CSNet_train(net, varargin)
% The function automatically restarts after each training epoch by
% checkpointing.
%
% The function supports training on CPU or on one or more GPUs
% (specify the list of GPU IDs in the `gpus` option).
% Copyright (C) 2014-16 Andrea Vedaldi.
% All rights reserved.
%
% This file is part of the VLFeat library and is made available under
% the terms of the BSD license (see the COPYING file).
%%%-------------------------------------------------------------------------
%%% solvers: SGD(default) and Adam with(default)/without gradientClipping
%%%-------------------------------------------------------------------------
%%% solver: Adam
opts.solver = 'Adam';
opts.beta1 = 0.9;
opts.beta2 = 0.999;
opts.alpha = 0.01;
opts.epsilon = 1e-8;
%%% solver: SGD
% opts.solver = 'SGD';
opts.learningRate = 0.01;
opts.weightDecay = 0.001;
opts.momentum = 0.9 ;
%%% GradientClipping
opts.gradientClipping = false;
opts.theta = 0.005;
%%% specific parameter for Bnorm
opts.bnormLearningRate = 0;
%%%-------------------------------------------------------------------------
%%% setting for simplenn
%%%-------------------------------------------------------------------------
opts.conserveMemory = true;
opts.mode = 'normal';
opts.cudnn = true ;
opts.backPropDepth = +inf ;
opts.skipForward = false;
opts.numSubBatches = 1;
%%%-------------------------------------------------------------------------
%%% setting for model
%%%-------------------------------------------------------------------------
opts.batchSize = 128 ;
opts.gpus = [];
opts.numEpochs = 300 ;
opts.modelName = 'model';
opts.expDir = fullfile('data',opts.modelName) ;
opts.numberImdb = 1;
opts.imdbDir = opts.expDir;
%%%-------------------------------------------------------------------------
%%% update settings
%%%-------------------------------------------------------------------------
opts = vl_argparse(opts, varargin);
opts.numEpochs = numel(opts.learningRate);
if ~exist(opts.expDir, 'dir'), mkdir(opts.expDir) ; end
%%%-------------------------------------------------------------------------
%%% Initialization
%%%-------------------------------------------------------------------------
net = vl_simplenn_tidy(net); %%% fill in some eventually missing values
net.layers{end-1}.precious = 1;
vl_simplenn_display(net, 'batchSize', opts.batchSize) ;
state.getBatch = getBatch ;
%%%-------------------------------------------------------------------------
%%% Train and Test
%%%-------------------------------------------------------------------------
modelPath = @(ep) fullfile(opts.expDir, sprintf([opts.modelName,'-epoch-%d.mat'], ep));
start = findLastCheckpoint(opts.expDir,opts.modelName) ;
if start >= 1
fprintf('%s: resuming by loading epoch %d', mfilename, start) ;
load(modelPath(start), 'net') ;
net = vl_simplenn_tidy(net) ;
end
%%% load training data
opts.imdbPath = fullfile(opts.imdbDir);
imdb = load(opts.imdbPath) ;
opts.train = find(imdb.set==1);
for epoch = start+1 : opts.numEpochs
%%% Train for one epoch.
state.epoch = epoch ;
state.learningRate = opts.learningRate(min(epoch, numel(opts.learningRate)));
opts.thetaCurrent = opts.theta(min(epoch, numel(opts.theta)));
if numel(opts.gpus) == 1
net = vl_simplenn_move(net, 'gpu') ;
end
state.train = opts.train(randperm(numel(opts.train))) ; %%% shuffle
[net, state] = process_epoch(net, state, imdb, opts, 'train');
net.layers{end}.class =[];
net = vl_simplenn_move(net, 'cpu');
%%% save current model
save(modelPath(epoch), 'net')
end
%%%-------------------------------------------------------------------------
function [net, state] = process_epoch(net, state, imdb, opts, mode)
%%%-------------------------------------------------------------------------
if strcmp(mode,'train')
switch opts.solver
case 'SGD' %%% solver: SGD
for i = 1:numel(net.layers)
if isfield(net.layers{i}, 'weights')
for j = 1:numel(net.layers{i}.weights)
state.layers{i}.momentum{j} = 0;
end
end
end
case 'Adam' %%% solver: Adam
for i = 1:numel(net.layers)
if isfield(net.layers{i}, 'weights')
for j = 1:numel(net.layers{i}.weights)
state.layers{i}.t{j} = 0;
state.layers{i}.m{j} = 0;
state.layers{i}.v{j} = 0;
end
end
end
end
end
subset = state.(mode) ;
num = 0 ;
res = [];
for t=1:opts.batchSize:numel(subset)
for s=1:opts.numSubBatches
% get this image batch
batchStart = t + (s-1);
batchEnd = min(t+opts.batchSize-1, numel(subset)) ;
batch = subset(batchStart : opts.numSubBatches : batchEnd) ;
num = num + numel(batch) ;
if numel(batch) == 0, continue ; end
[inputs,labels] = state.getBatch(imdb, batch) ;
if numel(opts.gpus) == 1
inputs = gpuArray(inputs);
labels = gpuArray(labels);
end
if strcmp(mode, 'train')
dzdy = single(1);
evalMode = 'normal';%%% forward and backward (Gradients)
else
dzdy = [] ;
evalMode = 'test'; %%% forward only
end
net.layers{end}.class = labels ;
res = vl_simplenn(net, inputs, dzdy, res, ...
'accumulate', s ~= 1, ...
'mode', evalMode, ...
'conserveMemory', opts.conserveMemory, ...
'backPropDepth', opts.backPropDepth, ...
'cudnn', opts.cudnn) ;
end
if strcmp(mode, 'train')
[state, net] = params_updates(state, net, res, opts, opts.batchSize) ;
end
lossL2 = gather(res(end).x) ;
%%%--------add your code here------------------------
%%%--------------------------------------------------
fprintf('%s: epoch %02d dataset %02d: %3d/%3d:', mode, state.epoch, mod(state.epoch,opts.numberImdb), ...
fix((t-1)/opts.batchSize)+1, ceil(numel(subset)/opts.batchSize)) ;
fprintf('error: %f \n', lossL2) ;
end
%%%-------------------------------------------------------------------------
function [state, net] = params_updates(state, net, res, opts, batchSize)
%%%-------------------------------------------------------------------------
switch opts.solver
case 'SGD' %%% solver: SGD
for l=numel(net.layers):-1:1
for j=1:numel(res(l).dzdw)
if j == 3 && strcmp(net.layers{l}.type, 'bnorm')
%%% special case for learning bnorm moments
thisLR = net.layers{l}.learningRate(j) - opts.bnormLearningRate;
net.layers{l}.weights{j} = vl_taccum(...
1 - thisLR, ...
net.layers{l}.weights{j}, ...
thisLR / batchSize, ...
res(l).dzdw{j}) ;
else
thisDecay = opts.weightDecay * net.layers{l}.weightDecay(j);
thisLR = state.learningRate * net.layers{l}.learningRate(j);
if opts.gradientClipping
theta = opts.thetaCurrent/thisLR;
state.layers{l}.momentum{j} = opts.momentum * state.layers{l}.momentum{j} ...
- thisDecay * net.layers{l}.weights{j} ...
- (1 / batchSize) * gradientClipping(res(l).dzdw{j},theta) ;
net.layers{l}.weights{j} = net.layers{l}.weights{j} + ...
thisLR * state.layers{l}.momentum{j} ;
else
state.layers{l}.momentum{j} = opts.momentum * state.layers{l}.momentum{j} ...
- thisDecay * net.layers{l}.weights{j} ...
- (1 / batchSize) * res(l).dzdw{j} ;
net.layers{l}.weights{j} = net.layers{l}.weights{j} + ...
thisLR * state.layers{l}.momentum{j} ;
end
end
end
end
case 'Adam' %%% solver: Adam
for l=numel(net.layers):-1:1
for j=1:numel(res(l).dzdw)
if j == 3 && strcmp(net.layers{l}.type, 'bnorm')
%%% special case for learning bnorm moments
thisLR = net.layers{l}.learningRate(j) - opts.bnormLearningRate;
net.layers{l}.weights{j} = vl_taccum(...
1 - thisLR, ...
net.layers{l}.weights{j}, ...
thisLR / batchSize, ...
res(l).dzdw{j}) ;
else
thisLR = state.learningRate * net.layers{l}.learningRate(j);
state.layers{l}.t{j} = state.layers{l}.t{j} + 1;
t = state.layers{l}.t{j};
alpha = thisLR;
lr = alpha * sqrt(1 - opts.beta2^t) / (1 - opts.beta1^t);
state.layers{l}.m{j} = state.layers{l}.m{j} + (1 - opts.beta1) .* (res(l).dzdw{j} - state.layers{l}.m{j});
state.layers{l}.v{j} = state.layers{l}.v{j} + (1 - opts.beta2) .* (res(l).dzdw{j} .* res(l).dzdw{j} - state.layers{l}.v{j});
if opts.gradientClipping
theta = opts.thetaCurrent/lr;
net.layers{l}.weights{j} = net.layers{l}.weights{j} - lr * gradientClipping(state.layers{l}.m{j} ./ (sqrt(state.layers{l}.v{j}) + opts.epsilon),theta);
else
net.layers{l}.weights{j} = net.layers{l}.weights{j} - lr * state.layers{l}.m{j} ./ (sqrt(state.layers{l}.v{j}) + opts.epsilon);
end
% net.layers{l}.weights{j} = weightClipping(net.layers{l}.weights{j},2); % gradually clip the weights
end
end
end
end
%%%-------------------------------------------------------------------------
function epoch = findLastCheckpoint(modelDir,modelName)
%%%-------------------------------------------------------------------------
list = dir(fullfile(modelDir, [modelName,'-epoch-*.mat'])) ;
tokens = regexp({list.name}, [modelName,'-epoch-([\d]+).mat'], 'tokens') ;
epoch = cellfun(@(x) sscanf(x{1}{1}, '%d'), tokens) ;
epoch = max([epoch 0]) ;
%%%-------------------------------------------------------------------------
function A = gradientClipping(A, theta)
%%%-------------------------------------------------------------------------
A(A>theta) = theta;
A(A<-theta) = -theta;
%%%-------------------------------------------------------------------------
function A = weightClipping(A, theta)
%%%-------------------------------------------------------------------------
A(A>theta) = A(A>theta) -0.0005;
A(A<-theta) = A(A<-theta)+0.0005;
%%%-------------------------------------------------------------------------
function fn = getBatch
%%%-------------------------------------------------------------------------
fn = @(x,y) getSimpleNNBatch(x,y);
%%%-------------------------------------------------------------------------
function [inputs,labels] = getSimpleNNBatch(imdb, batch)
%%%-------------------------------------------------------------------------
inputs = imdb.inputs(:,:,:,batch);
rng('shuffle');
mode = randperm(8);
inputs = data_augmentation_CSNet(inputs, mode(1));
labels = inputs;
function image = data_augmentation_CSNet(image, mode)
if mode == 1
return;
end
if mode == 2 % flipped
image = flipud(image);
return;
end
if mode == 3 % rotation 90
image = fliplr(image);
return;
end
if mode == 4 % rotation 90 & flipped
image = fliplr(image);
image = flipud(image);
return;
end
function image = data_augmentation(image, mode)
if mode == 1
return;
end
if mode == 2 % flipped
image = flipud(image);
return;
end
if mode == 3 % rotation 90
image = rot90(image,1);
return;
end
if mode == 4 % rotation 90 & flipped
image = rot90(image,1);
image = flipud(image);
return;
end
if mode == 5 % rotation 180
image = rot90(image,2);
return;
end
if mode == 6 % rotation 180 & flipped
image = rot90(image,2);
image = flipud(image);
return;
end
if mode == 7 % rotation 270
image = rot90(image,3);
return;
end
if mode == 8 % rotation 270 & flipped
image = rot90(image,3);
image = flipud(image);
return;
end
|
github
|
cedricxie/MATLAB_Automated_Driving_Box-master
|
clusterDetections.m
|
.m
|
MATLAB_Automated_Driving_Box-master/Sensor_Fusion_Using_Synthetic_Radar_and_Vision_Data/clusterDetections.m
| 2,042 |
utf_8
|
6d58bf60e4d9920de8dcf76f50fc1911
|
% clusterDetections
% This function merges multiple detections suspected to be of the same vehicle to a single detection.
% The function looks for detections that are closer than the size of a vehicle.
% Detections that fit this criterion are considered a cluster and are merged to a single detection
% at the centroid of the cluster.
% The measurement noises are modified to represent the possibility that each detection can be anywhere on the vehicle.
% Therefore, the noise should have the same size as the vehicle size.
% In addition, this function removes the third dimension of the measurement (the height) and
% reduces the measurement vector to [x;y;vx;vy].
function detectionClusters = clusterDetections(detections, vehicleSize)
N = numel(detections);
distances = zeros(N);
for i = 1:N
for j = i+1:N
if detections{i}.SensorIndex == detections{j}.SensorIndex
distances(i,j) = norm(detections{i}.Measurement(1:2) - detections{j}.Measurement(1:2));
else
distances(i,j) = inf;
end
end
end
leftToCheck = 1:N;
i = 0;
detectionClusters = cell(N,1);
while ~isempty(leftToCheck)
% Remove the detections that are in the same cluster as the one under
% consideration
underConsideration = leftToCheck(1);
clusterInds = (distances(underConsideration, leftToCheck) < vehicleSize);
detInds = leftToCheck(clusterInds);
clusterDets = [detections{detInds}];
clusterMeas = [clusterDets.Measurement];
meas = mean(clusterMeas, 2);
meas2D = [meas(1:2);meas(4:5)];
i = i + 1;
detectionClusters{i} = detections{detInds(1)};
detectionClusters{i}.Measurement = meas2D;
leftToCheck(clusterInds) = [];
end
detectionClusters(i+1:end) = [];
% Since the detections are now for clusters, modify the noise to represent
% that they are of the whole car
for i = 1:numel(detectionClusters)
measNoise(1:2,1:2) = vehicleSize^2 * eye(2);
measNoise(3:4,3:4) = eye(2) * 100 * vehicleSize^2;
detectionClusters{i}.MeasurementNoise = measNoise;
end
end
|
github
|
cedricxie/MATLAB_Automated_Driving_Box-master
|
createDemoDisplay.m
|
.m
|
MATLAB_Automated_Driving_Box-master/Sensor_Fusion_Using_Synthetic_Radar_and_Vision_Data/createDemoDisplay.m
| 2,798 |
utf_8
|
61b8d87959d894c7cb1233665f7ca089
|
% createDemoDisplay
% This function creates a three-panel display:
% Top-left corner of display: A top view that follows the ego vehicle.
% Bottom-left corner of display: A chase-camera view that follows the ego vehicle.
% Right-half of display: A bird's-eye plot display.
function BEP = createDemoDisplay(egoCar, sensors)
% Make a figure
hFigure = figure('Position', [0, 0, 1200, 640], 'Name', 'Sensor Fusion with Synthetic Data Example');
movegui(hFigure, [0 -1]); % Moves the figure to the left and a little down from the top
% Add a car plot that follows the ego vehicle from behind
hCarViewPanel = uipanel(hFigure, 'Position', [0 0 0.5 0.5], 'Title', 'Chase Camera View');
hCarPlot = axes(hCarViewPanel);
chasePlot(egoCar, 'Centerline', 'on', 'Parent', hCarPlot);
% Add a car plot that follows the ego vehicle from a top view
hTopViewPanel = uipanel(hFigure, 'Position', [0 0.5 0.5 0.5], 'Title', 'Top View');
hCarPlot = axes(hTopViewPanel);
chasePlot(egoCar, 'Centerline', 'on', 'Parent', hCarPlot, 'ViewHeight', 130, 'ViewLocation', [0 0], 'ViewPitch', 90);
% Add a panel for a bird's-eye plot
hBEVPanel = uipanel(hFigure, 'Position', [0.5 0 0.5 1], 'Title', 'Bird''s-Eye Plot');
% Create bird's-eye plot for the ego car and sensor coverage
hBEVPlot = axes(hBEVPanel);
frontBackLim = 60;
BEP = birdsEyePlot('Parent', hBEVPlot, 'Xlimits', [-frontBackLim frontBackLim], 'Ylimits', [-35 35]);
% Plot the coverage areas for radars
for i = 1:6
cap = coverageAreaPlotter(BEP,'FaceColor','red','EdgeColor','red');
plotCoverageArea(cap, sensors{i}.SensorLocation,...
sensors{i}.MaxRange, sensors{i}.Yaw, sensors{i}.FieldOfView(1));
end
% Plot the coverage areas for vision sensors
for i = 7:8
cap = coverageAreaPlotter(BEP,'FaceColor','blue','EdgeColor','blue');
plotCoverageArea(cap, sensors{i}.SensorLocation,...
sensors{i}.MaxRange, sensors{i}.Yaw, 45);
end
% Create a vision detection plotter put it in a struct for future use
detectionPlotter(BEP, 'DisplayName','vision', 'MarkerEdgeColor','blue', 'Marker','^');
% Combine all radar detctions into one entry and store it for later update
detectionPlotter(BEP, 'DisplayName','radar', 'MarkerEdgeColor','red');
% Add road borders to plot
laneBoundaryPlotter(BEP, 'DisplayName','road', 'Color', [.75 .75 0]);
% Add the tracks to the bird's-eye plot. Show last 10 track updates.
trackPlotter(BEP, 'DisplayName','track', 'HistoryDepth',10);
axis(BEP.Parent, 'equal');
xlim(BEP.Parent, [-frontBackLim frontBackLim]);
ylim(BEP.Parent, [-40 40]);
% Add an outline plotter for ground truth
outlinePlotter(BEP, 'Tag', 'Ground truth');
end
|
github
|
cedricxie/MATLAB_Automated_Driving_Box-master
|
vehicleToImageROI.m
|
.m
|
MATLAB_Automated_Driving_Box-master/Visual_Perception_Using_Monocular_Camera/vehicleToImageROI.m
| 653 |
utf_8
|
9afa4c556cc9a400e9c90235a6468c54
|
%%
% *vehicleToImageROI* converts ROI in vehicle coordinates to image coordinates
% in bird's-eye-view image.
function imageROI = vehicleToImageROI(birdsEyeConfig, vehicleROI)
vehicleROI = double(vehicleROI);
loc2 = abs(vehicleToImage(birdsEyeConfig, [vehicleROI(2) vehicleROI(4)]));
loc1 = abs(vehicleToImage(birdsEyeConfig, [vehicleROI(1) vehicleROI(4)]));
loc4 = vehicleToImage(birdsEyeConfig, [vehicleROI(1) vehicleROI(4)]);
loc3 = vehicleToImage(birdsEyeConfig, [vehicleROI(1) vehicleROI(3)]);
[minRoiX, maxRoiX, minRoiY, maxRoiY] = deal(loc4(1), loc3(1), loc2(2), loc1(2));
imageROI = round([minRoiX, maxRoiX, minRoiY, maxRoiY]);
end
|
github
|
cedricxie/MATLAB_Automated_Driving_Box-master
|
takeSnapshot.m
|
.m
|
MATLAB_Automated_Driving_Box-master/Visual_Perception_Using_Monocular_Camera/takeSnapshot.m
| 747 |
utf_8
|
1791ab533aacfe782877dc4956d747f3
|
%%
% *takeSnapshot* captures the output for the HTML publishing report.
function I = takeSnapshot(frame, sensor, sensorOut)
% Unpack the inputs
leftEgoBoundary = sensorOut.leftEgoBoundary;
rightEgoBoundary = sensorOut.rightEgoBoundary;
locations = sensorOut.vehicleLocations;
xVehiclePoints = sensorOut.xVehiclePoints;
bboxes = sensorOut.vehicleBoxes;
frameWithOverlays = insertLaneBoundary(frame, leftEgoBoundary, sensor, xVehiclePoints, 'Color','Red');
frameWithOverlays = insertLaneBoundary(frameWithOverlays, rightEgoBoundary, sensor, xVehiclePoints, 'Color','Green');
frameWithOverlays = insertVehicleDetections(frameWithOverlays, locations, bboxes);
I = frameWithOverlays;
end
|
github
|
cedricxie/MATLAB_Automated_Driving_Box-master
|
validateBoundaryFcn.m
|
.m
|
MATLAB_Automated_Driving_Box-master/Visual_Perception_Using_Monocular_Camera/validateBoundaryFcn.m
| 364 |
utf_8
|
3899e015c1d26d20057a5c58c7b0d8d3
|
%%
% *validateBoundaryFcn* rejects some of the lane boundary curves
% computed using the RANSAC algorithm.
function isGood = validateBoundaryFcn(params)
if ~isempty(params)
a = params(1);
% Reject any curve with a small 'a' coefficient, which makes it highly
% curved.
isGood = abs(a) < 0.003; % a from ax^2+bx+c
else
isGood = false;
end
end
|
github
|
cedricxie/MATLAB_Automated_Driving_Box-master
|
insertVehicleDetections.m
|
.m
|
MATLAB_Automated_Driving_Box-master/Visual_Perception_Using_Monocular_Camera/insertVehicleDetections.m
| 480 |
utf_8
|
3ab6baac14f95acdee8c09e73aa3706a
|
%%
% *insertVehicleDetections* inserts bounding boxes and displays
% [x,y] locations corresponding to returned vehicle detections.
function imgOut = insertVehicleDetections(imgIn, locations, bboxes)
imgOut = imgIn;
for i = 1:size(locations, 1)
location = locations(i, :);
bbox = bboxes(i, :);
label = sprintf('X=%0.2f, Y=%0.2f', location(1), location(2));
imgOut = insertObjectAnnotation(imgOut, ...
'rectangle', bbox, label, 'Color','g');
end
end
|
github
|
cedricxie/MATLAB_Automated_Driving_Box-master
|
computeVehicleLocations.m
|
.m
|
MATLAB_Automated_Driving_Box-master/Visual_Perception_Using_Monocular_Camera/computeVehicleLocations.m
| 1,058 |
utf_8
|
7df6f836cb4cca22035ffe4fad5968d8
|
%%
% *computeVehicleLocations* calculates the location of a vehicle
% in vehicle coordinates, given a bounding box returned by a detection
% algorithm in image coordinates. It returns the center location of the
% bottom of the bounding box in vehicle coordinates. Because a monocular
% camera sensor and a simple homography are used, only distances along the
% surface of the road can be computed. Computation of an arbitrary location
% in 3-D space requires use of a stereo camera or another sensor capable of
% triangulation.
function locations = computeVehicleLocations(bboxes, sensor)
locations = zeros(size(bboxes,1),2);
for i = 1:size(bboxes, 1)
bbox = bboxes(i, :);
% Get [x,y] location of the center of the lower portion of the
% detection bounding box in meters. bbox is [x, y, width, height] in
% image coordinates, where [x,y] represents upper-left corner.
yBottom = bbox(2) + bbox(4) - 1;
xCenter = bbox(1) + (bbox(3)-1)/2; % approximate center
locations(i,:) = imageToVehicle(sensor, [xCenter, yBottom]);
end
end
|
github
|
cedricxie/MATLAB_Automated_Driving_Box-master
|
classifyLaneTypes.m
|
.m
|
MATLAB_Automated_Driving_Box-master/Visual_Perception_Using_Monocular_Camera/classifyLaneTypes.m
| 979 |
utf_8
|
4fa6c4e6726da000a539c83cb0589b84
|
%%
% *classifyLaneTypes* determines lane marker types as |solid|, |dashed|, etc.
function boundaries = classifyLaneTypes(boundaries, boundaryPoints)
for bInd = 1 : numel(boundaries)
vehiclePoints = boundaryPoints{bInd};
% Sort by x
vehiclePoints = sortrows(vehiclePoints, 1);
xVehicle = vehiclePoints(:,1);
xVehicleUnique = unique(xVehicle);
% Dashed vs solid
xdiff = diff(xVehicleUnique);
% Sufficiently large threshold to remove spaces between points of a
% solid line, but not large enough to remove spaces between dashes
xdifft = mean(xdiff) + 3*std(xdiff);
largeGaps = xdiff(xdiff > xdifft);
% Safe default
boundaries(bInd).BoundaryType= LaneBoundaryType.Solid;
if largeGaps>2
% Ideally, these gaps should be consistent, but you cannot rely
% on that unless you know that the ROI extent includes at least 3 dashes.
boundaries(bInd).BoundaryType = LaneBoundaryType.Dashed;
end
end
end
|
github
|
cedricxie/MATLAB_Automated_Driving_Box-master
|
visualizeSensorResults.m
|
.m
|
MATLAB_Automated_Driving_Box-master/Visual_Perception_Using_Monocular_Camera/visualizeSensorResults.m
| 2,379 |
utf_8
|
469fbc729ba3949f70d12fce4d77e690
|
%% visualizeSensorResults displays core information and intermediate results from the monocular camera sensor simulation.
function isPlayerOpen = visualizeSensorResults(frame, sensor, sensorOut,...
intOut, closePlayers)
% Unpack the main inputs
leftEgoBoundary = sensorOut.leftEgoBoundary;
rightEgoBoundary = sensorOut.rightEgoBoundary;
locations = sensorOut.vehicleLocations;
xVehiclePoints = sensorOut.xVehiclePoints;
bboxes = sensorOut.vehicleBoxes;
% Unpack additional intermediate data
birdsEyeViewImage = intOut.birdsEyeImage;
birdsEyeConfig = intOut.birdsEyeConfig;
vehicleROI = intOut.vehicleROI;
birdsEyeViewBW = intOut.birdsEyeBW;
% Visualize left and right ego-lane boundaries in bird's-eye view
birdsEyeWithOverlays = insertLaneBoundary(birdsEyeViewImage, leftEgoBoundary , birdsEyeConfig, xVehiclePoints, 'Color','Red');
birdsEyeWithOverlays = insertLaneBoundary(birdsEyeWithOverlays, rightEgoBoundary, birdsEyeConfig, xVehiclePoints, 'Color','Green');
% Visualize ego-lane boundaries in camera view
frameWithOverlays = insertLaneBoundary(frame, leftEgoBoundary, sensor, xVehiclePoints, 'Color','Red');
frameWithOverlays = insertLaneBoundary(frameWithOverlays, rightEgoBoundary, sensor, xVehiclePoints, 'Color','Green');
frameWithOverlays = insertVehicleDetections(frameWithOverlays, locations, bboxes);
imageROI = vehicleToImageROI(birdsEyeConfig, vehicleROI);
ROI = [imageROI(1) imageROI(3) imageROI(2)-imageROI(1) imageROI(4)-imageROI(3)];
% Highlight candidate lane points that include outliers
birdsEyeViewImage = insertShape(birdsEyeViewImage, 'rectangle', ROI); % show detection ROI
birdsEyeViewImage = imoverlay(birdsEyeViewImage, birdsEyeViewBW, 'blue');
% Display the results
frames = {frameWithOverlays, birdsEyeViewImage, birdsEyeWithOverlays};
persistent players;
if isempty(players)
frameNames = {'Lane marker and vehicle detections', 'Raw segmentation', 'Lane marker detections'};
players = helperVideoPlayerSet(frames, frameNames);
end
update(players, frames);
% Terminate the loop when the first player is closed
isPlayerOpen = isOpen(players, 1);
if (~isPlayerOpen || closePlayers) % close down the other players
clear players;
end
end
|
github
|
tjdodwell/matLam-master
|
makeMesh.m
|
.m
|
matLam-master/include/preProcessing/makeMesh.m
| 4,803 |
utf_8
|
6896a25bf75578e555cda0b40938ee2c
|
function msh = makeMesh(model)
% -----------------------------------------------------------------------
% This code is released under GNU LESSER GENERAL PUBLIC LICENSE v3 (LGPL)
%
% Details are provided in license.txt file in the main directory
%
% 1/8/14 - Dr T. J. Dodwell - University of Bath - [email protected]
% -----------------------------------------------------------------------
% makemsh.m - Written (TJD - 3/6/2014)
%
% Creates Coarse Quadrilateral msh on [0,Lx] by [0,Ly] and refines uniformly to desired msh size
%
% --------------------------------
% (1) Set up Coarse Rectangle
% --------------------------------
msh.coords = [0 0;
model.Lx 0;
model.Lx model.Ly;
0 model.Ly];
msh.elements = 1:4;
%
nodesOfRefinement = zeros(9,1);
edgeTable = zeros(0,3); % Create an empty matrix with two columns
for ii = 1:model.meshRefinement % For each refinement
visitedEdges = 0;
nelem = 0;
inode = 0;
newcoords = [];
nodesPreviousRefinement = size(msh.coords(:,1),1);
for ie = 1:size(msh.elements,1); % Each Element
nodesOfRefinement(1:4) = msh.elements(ie,:);
% First Add Mid Point
inode=inode+1;
newcoords(inode,:) = 0.5*(msh.coords(msh.elements(ie,1),:) + msh.coords(msh.elements(ie,3),:));
nodesOfRefinement(5)=inode + nodesPreviousRefinement;
for edge = 1:4 % For Each Edge
n1 = msh.elements(ie,edge); n2 = msh.elements(ie,mod(edge,4)+1);
% Has Edge been visited before - if so return id = 1 and the node
[id,oldNode] = edgeVisited(edgeTable,n1,n2);
if id == 0 % If new edge add midpoint as new node
inode=inode+1;
nodesOfRefinement(5+edge) = inode + nodesPreviousRefinement;
newcoords(inode,:) = 0.5*(msh.coords(n1,:)+msh.coords(n2,:));
edgeTable(visitedEdges+1,1:2) = [n1,n2];
edgeTable(visitedEdges+1,3) = inode + nodesPreviousRefinement;
visitedEdges=visitedEdges+1;
else
nodesOfRefinement(5+edge) = oldNode;
end
end % For each edge
newelements(nelem+1,:)=nodesOfRefinement([1,6,5,9]);
newelements(nelem+2,:)=nodesOfRefinement([6,2,7,5]);
newelements(nelem+3,:)=nodesOfRefinement([5,7,3,8]);
newelements(nelem+4,:)=nodesOfRefinement([9,5,8,4]);
nelem=nelem+4;
end
msh.elements = newelements;
msh.coords = [msh.coords;newcoords];
end % for each refinelement
% Mesh Constructed
msh.nnod = size(msh.coords,1);
msh.nel = size(msh.elements,1);
msh.ndim = 2;
switch lower(model.type)
case 'mindlin'
msh.dof = 5;
case 'zigzag'
msh.dof = 7;
end
msh.nnodel = 4; % Nodes per Element
msh.nedof = msh.nnodel*msh.dof;
msh.tdof = msh.dof*msh.nnod;
% Setup local to global number of general element formulation with msh.dof degrees of freedom per node.
msh.e2g = zeros(msh.nel,msh.nedof);
for ie = 1 : msh.nel
ne = msh.elements(ie,:);
for j = 1 : msh.dof
msh.e2g(ie,(j-1)*length(ne) + 1: j * length(ne)) = (j-1) * msh.nnod + ne;
end
end
[msh.nip,msh.IP_X, msh.IP_w] = ip_quad(model.integrationOption);
[msh.N, msh.dNdu] = shapeFunctionQ4(msh.IP_X);
end
function [id,node] = edgeVisited(edgeTable,n1,n2)
id = 0; node = 0;
numEdgesVisited = size(edgeTable,1);
temp = find(edgeTable(:,1) == n1);
for i = 1:length(temp)
if edgeTable(temp(i),2) == n2
id = 1;
node = edgeTable(temp(i),3);
end
end
if id == 0
temp = find(edgeTable(:,2) == n1);
for i = 1:length(temp)
if edgeTable(temp(i),1) == n2
id = 1;
node = edgeTable(temp(i),3);
end
end
end
end
function [N, dNdu] = shapeFunctionQ4(IP_X,nnodel)
% TJD - June 2014
nip = size(IP_X,1);
N = cell(nip,1);
dNdu = cell(nip,1);
for i = 1:nip
xi = IP_X(i,1); eta = IP_X(i,2);
shp=0.25*[ (1-xi)*(1-eta);
(1+xi)*(1-eta);
(1+xi)*(1+eta);
(1-xi)*(1+eta)];
deriv=0.25*[-(1-eta), -(1-xi);
1-eta, -(1+xi);
1+eta, 1+xi;
-(1+eta), 1-xi];
N{i} = shp;
dNdu{i} = deriv';
end
end % end function shapeFunctionQ4
function [nip,IP_X,IP_W] = ip_quad(option)
% TJD - June 2014
% Gauss quadrature for Q4 elements
% option 'complete' (2x2)
% option 'reduced' (1x1)
% nip: Number of Integration Points
% ipx: Gauss point locations
% ipw: Gauss point weights
switch option
case 'complete'
nip = 4;
IP_X=...
[ -0.577350269189626 -0.577350269189626;
0.577350269189626 -0.577350269189626;
0.577350269189626 0.577350269189626;
-0.577350269189626 0.577350269189626];
IP_W=[ 1;1;1;1];
case 'reduced'
nip = 1;
IP_X=[0 0];
IP_W=[4];
end % end of switch 'option'
end % end of function ip_quad
|
github
|
tjdodwell/matLam-master
|
makeABDH2.m
|
.m
|
matLam-master/include/FEM/makeABDH2.m
| 7,495 |
utf_8
|
cfbbd48beb5d01237bbfee4326aab1c5
|
function mat = makeABDH2(model)
switch lower(model.type)
case 'mindlin'
% upper and lower coordinates
z = zeros(1,model.numPly+1);
z(1) = 0;
for i = 2:model.numPly+1
z(i) = z(i-1) + model.t(i-1);
end
z = z - mean(z);
model.material.nu21=model.material.nu12*(model.material.E2/model.material.E1);
factor=1-model.material.nu12*model.material.nu21;
Q = zeros(5);
Q(1,1)=model.material.E1/factor;
Q(1,2)=model.material.nu12*model.material.E2/factor;
Q(2,1)=Q(1,2);
Q(2,2)=model.material.E2/factor;
Q(3,3)=model.material.G12;
Q(4,4)=model.material.SF*model.material.G23;
Q(5,5)=model.material.SF*model.material.G13;
%______________________________________________
A = zeros(5); B = zeros(5); D = zeros(5); H = zeros(5); T = zeros(5);
for k=1:model.numPly
phi = model.ss(k);
% Transformation Matrix
c = cos(phi); s = sin(phi);
T(1,1) = c^2; T(1,2) = s^2; T(1,3) = 2*c*s;
T(2,1) = s^2; T(2,2) = c^2; T(2,3) = -2*c*s;
T(3,1) = -c*s; T(3,2) = c*s; T(3,3) = c^2-s^2;
T(4,4) = c; T(4,5) = s;
T(5,4) = -s; T(5,5) = c;
% [Q] in structural axes
invT = inv(T);
Qbar= invT*Q*(invT');
A= A + Qbar*(z(k+1)-z(k));
B= B + Qbar*(z(k+1)^2-z(k)^2)/2;
D= D + Qbar*(z(k+1)^3-z(k)^3)/3;
H= H + Qbar*(z(k+1)-z(k));
end
A = A(1:3,1:3); B = B(1:3,1:3); D = D(1:3,1:3);
H = H(4:5,4:5);
mat.A = A; mat.B = B; mat.D = D; mat.H = H;
case 'zigzag'
% Compute the interfaces
% upper and lower coordinates
z = zeros(1,model.numPly+1);
z(1) = 0;
for i = 2:model.numPly+1
z(i) = z(i-1) + model.t(i-1);
end
z = z - mean(z);
% Compute Q - composite matrix in local axis
model.material.nu21=model.material.nu12*(model.material.E2/model.material.E1);
factor=1-model.material.nu12*model.material.nu21;
Q = zeros(5);
Q(1,1)=model.material.E1/factor;
Q(1,2)=model.material.nu12*model.material.E2/factor;
Q(2,1)=Q(1,2);
Q(2,2)=model.material.E2/factor;
Q(3,3)=model.material.G12;
G23 = model.material.G23;
G13 = model.material.G13;
G13i = model.material.G13i;
G23i = model.material.G23i;
E_int = model.material.E2;
nu12_int = model.material.nu12;
% Compute Zig-Zag Matrices
[G1, G2] = computeGs(model,G13,G23,G13i,G23i); % Compute laminate shear moduli
A = zeros(3); B = zeros(3,7); D = zeros(7); H = zeros(4);
for k = 1 : model.numPly
phi = model.ss(k);
if (phi > 0) % This is a composite ply
% Transformation Matrix
c = cos(phi); s = sin(phi);
T = zeros(3);
T(1,1) = c^2; T(1,2) = s^2; T(1,3) = 2*c*s;
T(2,1) = s^2; T(2,2) = c^2; T(2,3) = -2*c*s;
T(3,1) = -c*s; T(3,2) = c*s; T(3,3) = c^2-s^2;
% Rotate [Q] to structural axes
invT = inv(T);
Qk = invT * Q * (invT');
% Shear Matrix
Hk = [cos(phi)^2 * G23 + sin(phi)^2 * G13, sin(phi) * cos(phi) * (G13 - G23);
sin(phi) * cos(phi) * (G13 - G23), (cos(phi) ^2) * G13 + (sin(phi) ^ 2) * G23];
else % This is an interface layer
% In this case notation is required.
factor = 1.0 - nu12_int * nu12_int;
Qk = [E_int/factor, nu12_int * E_int/factor, 0.0; nu12_int * E_int/factor, E_int/factor, 0.0; 0.0, 0.0, E_int / (2.0 * (1.0 + nu12_int))];
Hk = [G23i,0.0;0.0,G13i];
end
% Compute A matrix - constant within each layer
A = A + Qk * model.t(k);
% Compute B matrix - linear in each layer - compute exactly with trapezoidal rule
Bk0 = calB_phi(z(k),k,G13,G23,G13i,G23i,G1,G2,model);
Bk1 = calB_phi(z(k+1),k,G13,G23,G13i,G23i,G1,G2,model);
B = B + 0.5 * model.t(k) * Qk * (Bk1 + Bk0);
% Compute D matrix - since quadratic in each layer - compute exactly with simpsons rule
Bkhalf = calB_phi(0.5*(z(k)+z(k+1)),k,G13,G23,G13i,G23i,G1,G2,model);
D = D + (1.0 / 6.0) * model.t(k) * (Bk0' * Qk * Bk0 + 4.0 * Bkhalf' * Qk * Bkhalf + Bk1' * Qk * Bk1);
% Compute G matrix
[beta1, beta2] = computeBetas(k,G1,G2,G13,G23,G13i,G23i,model);
Bb = [1.0, beta2, 0.0, 0.0; 0.0, 0.0, 1.0, beta1];
H = H + model.t(k) * (Bb' * Hk * Bb) * model.t(k);
end % end for each ply
mat.A = A; mat.B = B; mat.D = D; mat.H = H;
end
end
% model.t - contains layer thickness
% model.ss - contains stacking sequence
% Need a function which calculates Bphi
function [G1, G2] = computeGs(model,G13,G23,G13i,G23i)
G1 = 0.0; G2 = 0.0;
for k = 1 : 2 : model.numPly
phi = model.ss(k);
Q11k = (cos(phi) ^ 2) * G13 + (sin(phi) ^ 2) * G23;
Q22k = (cos(phi) ^ 2) * G23 + (sin(phi) ^ 2) * G13;
G1 = G1 + model.t(k) / Q11k;
G2 = G2 + model.t(k) / Q22k;
end
for k = 2 : 2 : model.numPly % For the interfaces
G1 = G1 + model.t(k) / G13i;
G2 = G2 + model.t(k) / G23i;
end
G1 = G1 / sum(model.t); G2 = G2 / sum(model.t);
G1 = 1 / G1; G2 = 1 / G2;
end
function [beta1, beta2] = computeBetas(k,G1,G2,G13,G23,G13i,G23i,model)
if (model.ss(k) < 0.0) % It is an interface
beta1 = G1 / G13i - 1.0;
beta2 = G2 / G23i - 1.0;
else
phi = model.ss(k);
Q11k = (cos(phi) ^ 2) * G13 + (sin(phi) ^ 2) * G23;
Q22k = (cos(phi) ^ 2) * G23 + (sin(phi) ^ 2) * G13;
beta1 = G1 / Q11k - 1.0;
beta2 = G2 / Q22k - 1.0;
end
end
function B = calB_phi(z,k,G13,G23,G13i,G23i,G1,G2,model)
B = zeros(3,7);
[phi1, phi2] = calPhi(z,k,G13,G23,G13i,G23i,G1,G2,model);
B(1,1) = z; B(1,2) = phi1;
B(2,3) = z; B(2,4) = phi2;
B(3,5) = z; B(3,6) = phi1; B(3,7) = phi2;
end
function [phi1, phi2] = calPhi(z,k,G13,G23,G13i,G23i,G1,G2,model)
% Note that this is a linear function in z
h = 0.5 * sum(model.t);
phi = model.ss(k);
Q11k = (cos(phi) ^ 2) * G13 + (sin(phi) ^ 2) * G23;
Q22k = (cos(phi) ^ 2) * G23 + (sin(phi) ^ 2) * G13;
phi1 = (z + h) * (G1 / Q11k - 1.0);
phi2 = (z + h) * (G2 / Q22k - 1.0);
if (k > 0)
for i = 2 : model.numPly
phi = model.ss(i);
if(phi < 0.0)
Q11i = G13i;
Q22i = G23i;
else
Q11i = (cos(phi) ^ 2 * G13) + (sin(phi) ^ 2) * G23;
Q22i = (cos(phi) ^ 2 * G23) + (sin(phi) ^ 2) * G13;
end
phi1 = phi1 + model.t(i-1) * (G1 / Q11i - G1 / Q11k);
phi2 = phi2 + model.t(i-1) * (G2 / Q22i - G2 / Q22k);
end
end
end
|
github
|
tjdodwell/matLam-master
|
elementShapeFunctions.m
|
.m
|
matLam-master/include/FEM/elementShapeFunctions.m
| 1,612 |
utf_8
|
30bd5c1f46692a39f093d1514e018e26
|
function [Ni,dNdX,detJ] = elementShapeFunctions(msh,ie,ip,integration_option)
switch lower(integration_option);
case 'full'
[IP_X,IP_W] = ip_quad;
[N, dNdu] = shapeFunctionQ4(IP_X);
Ni = N{ip}; dNdui = dNdu{ip};
case 'reduced'
Ni = msh.N{1}; dNdui = msh.dNdu{1};
end
J = msh.coords(msh.elements(ie,:),:)'*dNdui';
detJ = det(J);
dNdX = dNdui'*inv(J);
end
function [N, dNdu] = shapeFunctionQ4(IP_X)
% TJD - June 2014
nip = 4;
N = cell(nip,1);
dNdu = cell(nip,1);
for i = 1:nip
xi = IP_X(i,1); eta = IP_X(i,2);
shp=0.25*[ (1-xi)*(1-eta);
(1+xi)*(1-eta);
(1+xi)*(1+eta);
(1-xi)*(1+eta)];
deriv=0.25*[-(1-eta), -(1-xi);
1-eta, -(1+xi);
1+eta, 1+xi;
-(1+eta), 1-xi];
N{i} = shp;
dNdu{i} = deriv';
end
end % end function shapeFunctionQ4
function [IP_X,IP_W] = ip_quad
% TJD - June 2014
% Gauss quadrature for Q4 elements
% option 'complete' (2x2)
% option 'reduced' (1x1)
% nip: Number of Integration Points
% ipx: Gauss point locations
% ipw: Gauss point weights
IP_X=...
[ -0.577350269189626 -0.577350269189626;
0.577350269189626 -0.577350269189626;
0.577350269189626 0.577350269189626;
-0.577350269189626 0.577350269189626];
IP_W=[ 1;1;1;1];
end % end of function ip_quad
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
matchExposures.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_1/project_1/matchExposures.m
| 2,853 |
utf_8
|
ae91ed3665fbf30805c02a26aedd688d
|
function [matchedImage] = matchExposures(images, transforms, performLoop)
numberImages = size(images, 4);
gammaList = ones(numberImages, 1);
for i = 2 : numberImages
gammaList(i) = matchImagePair(images(:, :, :, i - 1), images(:, :, :, i), transforms(:, :, i));
end
if performLoop
logGammaList = log(gammaList);
logGammaList(1) = [];
A = eye(nImgs - 2);
A = [A; -ones(1, numberImages - 2)];
updatedLogGammaList = A \ logGammaList;
updatedLogGammaList = [0; updatedLogGammaList];
finalGammas = exp(updatedLogGammaList);
accGammaList = ones(nImgs, 1);
for i = 2 : numberImages - 1
accGammaList(i) = accGammaList(i - 1) * finalGammas(i);
end
else
accGammaList = ones(numberImages, 1);
for i = 2 : numberImages
accGammaList(i) = accGammaList(i - 1) * gammaList(i);
end
end
matchedImage = zeros(size(images), 'uint8');
for i = 1 : numberImages
matchedImage(:, :, :, i) = gammaCorrection(images(:, :, :, i), accGammaList(i));
end
end
%% Match pairs of images
function [gammaVal] = matchImagePair(image1, image2, transformVal)
numberIterations = 1000;
alphaVal = 1;
sampleRatioVal = 0.01;
outlierThresholdVal = 1.0;
height = size(image1, 1);
width = size(image1, 2);
labImage1 = rgb2lab(image1);
labImage2 = rgb2lab(image2);
k = 1;
numberPixels = numel(image1);
numberSamples = round(numberPixels * sampleRatioVal);
samples = zeros(numberSamples, 2);
while true
pixel2 = [randi([1 height]); randi([1 width]); 1];
pixel1 = transformVal * pixel2;
pixel1 = pixel1 ./ pixel1(3);
if pixel1(1) >= 1 && pixel1(1) < height && pixel1(2) >= 1 && pixel1(2) < width
i = floor(pixel1(2));
a = pixel1(2) - i;
j = floor(pixel1(1));
b = pixel1(1) - j;
sample1 = (1 - a) * (1 - b) * labImage1(j, i, 1) + a * (1 - b) * labImage1(j, i + 1, 1) + a * b * labImage1(j + 1, i + 1, 1) + (1 - a) * b * labImage1(j + 1, i, 1);
sample2 = labImage2(pixel2(1), pixel2(2), 1);
if sample1 > outlierThresholdVal && sample2 > outlierThresholdVal
samples(k, 1) = sample1 / 100;
samples(k, 2) = sample2 / 100;
k = k + 1;
if k > numberSamples
break;
end
end
end
end
gammaVal = 1;
for i = 1 : numberIterations
gammaVal = gammaVal - alphaVal * sum((samples(:, 2) .^ gammaVal - samples(:, 1)) .* log(samples(:, 2)) .* (samples(:, 2) .^ gammaVal)) / numberSamples;
end
end
%% Perform Gamma Correction
function [gammaImage] = gammaCorrection(image, gammaVal)
labImage = rgb2lab(image);
labImage(:, :, 1) = (labImage(:, :, 1) / 100) .^ gammaVal * 100;
gammaImage = lab2rgb(labImage, 'OutputType', 'uint8');
end
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
CannyEdgeDetection.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_1/project_1/Functions/CannyEdgeDetection.m
| 3,412 |
utf_8
|
6f73fb6ab7f1dff7fd8e67c55ce58382
|
imageMatrix1 = imread('lineDetect1.bmp');
imageMatrix2 = imread('lineDetect2.bmp');
imageMatrix3 = imread('lineDetect3.bmp');
outputImage1 = edgeDetection(imageMatrix1);
outputImage2 = edgeDetection(imageMatrix1);
outputImage3 = edgeDetection(imageMatrix1);
imwrite(outputImage1, 'Outputs/cannyedgedetection1.png', 'png');
imwrite(outputImage2, 'Outputs/cannyedgedetection2.png', 'png');
imwrite(outputImage3, 'Outputs/cannyedgedetection3.png', 'png');
figure(1);
subplot(3,2,1); imagesc(imageMatrix1);
subplot(3,2,2); imagesc(outputImage1);
subplot(3,2,3); imagesc(imageMatrix2);
subplot(3,2,4); imagesc(outputImage2);
subplot(3,2,5); imagesc(imageMatrix3);
subplot(3,2,6); imagesc(outputImage3);
function outputImage = edgeDetection(imageMatrix)
LINE_SET = {};
EDGE_SET = {};
ITERS = 0;
TOTAL_NO_ITERS = 10000;
MAX_PAIR_DISTANCE = 100;
MIN_POINTLINE_DISTANCE = 2;
MIN_LINE_PIXEL_NUM = 50;
cannyEdges = edge(rgb2gray(imageMatrix),'Canny', 0.1);
sizeX = size(imageMatrix,1);
sizeY = size(imageMatrix,2);
for i = 1:1:sizeX
for j = 1:1:sizeY
if(cannyEdges(i,j) == 1)
EDGE_SET{end + 1} = [i j];
end
end
end
while(ITERS ~= TOTAL_NO_ITERS)
ITERS = ITERS + 1;
%disp(ITERS)
edgeSize = size(EDGE_SET);
randP = randi([1 edgeSize(2)]);
pPoint = EDGE_SET(randP);
pPointArray = pPoint{1,1};
px = pPointArray(1);
py = pPointArray(2);
dist = MAX_PAIR_DISTANCE + 1;
randQ = 0;
while(dist > MAX_PAIR_DISTANCE)
randQ = randi([1 edgeSize(2)]);
qPoint = EDGE_SET(randQ);
qPointArray = qPoint{1,1};
cqx = qPointArray(1);
cqy = qPointArray(2);
dist = pdist([px, py; cqx, cqy], 'euclidean');
end
qPoint = EDGE_SET(randQ);
qPointArray = qPoint{1,1};
INPUT_SET = {};
i = 1;
edgeS = edgeSize(2);
while(i < edgeS)
point = EDGE_SET(i);
pointArray = point{1,1};
x1 = pPointArray(1);
y1 = pPointArray(2);
x2 = qPointArray(1);
y2 = qPointArray(2);
x0 = pointArray(1);
y0 = pointArray(2);
numerator = abs((y2 - y1)*x0 - (x2 - x1)*y0 + (x2*y1) - (y2*x1));
denominator = sqrt((y2 - y1)^2 + (x2 - x1)^2);
dist = numerator / denominator;
if(dist <= MIN_POINTLINE_DISTANCE)
INPUT_SET{end + 1} = pointArray;
EDGE_SET(i) = [];
end
edgeS = edgeS - 1;
i = i + 1;
end
inputSize = size(INPUT_SET);
if(inputSize(2) >= MIN_LINE_PIXEL_NUM)
LINE_SET{end + 1} = INPUT_SET;
end
end
lineSize = size(LINE_SET);
newImage = uint8(zeros(sizeX, sizeY,3));
for(w = 1:1:lineSize(2))
randR = randi([50 200]);
randG = randi([50 200]);
randB = randi([50 200]);
currLine = LINE_SET(w);
currentL = currLine{1,1};
currLineSize = size(currentL);
for(q = 1:1:currLineSize(2))
pPoint = currLine(1);
pointArray = pPoint{1,1};
finalArray = pointArray(q);
points = finalArray{1, 1};
x = points(1);
y = points(2);
newImage(x, y, 1) = randR;
newImage(x, y, 2) = randG;
newImage(x, y, 3) = randB;
end
end
outputImage = newImage;
end
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
StereoMatching.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_1/project_1/Functions/StereoMatching.m
| 2,242 |
utf_8
|
5ed51e493dc2bb8c52487d16eafb2b2c
|
left1 = imread('left1.png');
left2 = imread('left2.png');
left3 = imread('left3.bmp');
right1 = imread('right1.png');
right2 = imread('right2.png');
right3 = imread('right3.bmp');
outputImage1 = stereoMatch(left1, right1);
outputImage2 = stereoMatch(left2, right2);
outputImage3 = stereoMatch(left3, right3);
imwrite(outputImage1, 'Outputs/stereomatching1.png', 'png');
imwrite(outputImage2, 'Outputs/stereomatching2.png', 'png');
imwrite(outputImage3, 'Outputs/stereomatching3.png', 'png');
colormap(gray);
% image(outputImage1);
% figure
% colormap(gray);
% image(outputImage2);
% figure
% colormap(gray);
% image(outputImage3);
figure(3);
subplot(3,3,1); imagesc(left1);
subplot(3,3,2); imagesc(right1);
subplot(3,3,3); imagesc(outputImage1);
subplot(3,3,4); imagesc(left2);
subplot(3,3,5); imagesc(right2);
subplot(3,3,6); imagesc(outputImage2);
subplot(3,3,7); imagesc(left3);
subplot(3,3,8); imagesc(right3);
subplot(3,3,9); imagesc(outputImage3);
function outputImage = stereoMatch(left, right)
DISPARITY_RANGE = 50;
WIN_SIZE = 5;
EXTEND = (WIN_SIZE - 1) / 2;
Nx = size(left, 1);
Ny = size(left, 2);
ileft = double(rgb2gray(left));
iright = double(rgb2gray(right));
colormap(gray);
DISPARITY = uint8(zeros(Nx, Ny, 3));
for y = 1:1:Nx
for x = 1:1:Ny
bestDisparity = 0;
bestNCC = 0; % Lowest NCC Score
for myDisp = 1:1:DISPARITY_RANGE
if(y - EXTEND >= 1 && y + EXTEND <= Nx && x - EXTEND >= 1 && x + EXTEND <= Ny && x - myDisp - EXTEND >= 1 && x - myDisp + EXTEND <= Ny)
Patch1 = ileft(y - EXTEND:y + EXTEND, x - EXTEND:x + EXTEND);
Patch2 = iright(y - EXTEND:y + EXTEND, x - myDisp - EXTEND:x - myDisp + EXTEND);
currNCC = NCC(Patch1, Patch2);
if(currNCC > bestNCC)
bestNCC = currNCC;
bestDisparity = myDisp;
end
end
end
%disp(bestDisparity)
DISPARITY(y,x, 1) = bestDisparity * 5;
DISPARITY(y,x, 2) = bestDisparity * 5;
DISPARITY(y,x, 3) = bestDisparity * 5;
end
end
% Not needed...
outputImage = DISPARITY;
end
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
SimpleSkySegmentation.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_1/project_1/Functions/SimpleSkySegmentation.m
| 1,708 |
utf_8
|
aac6dec37084561767bdb82c8459d3ed
|
imageMatrix1 = imread('detectSky1.bmp');
imageMatrix2 = imread('detectSky2.bmp');
imageMatrix3 = imread('detectSky3.bmp');
outputImage1 = segmentation(imageMatrix1);
outputImage2 = segmentation(imageMatrix2);
outputImage3 = segmentation(imageMatrix3);
imwrite(outputImage1, 'Outputs/simpleskydetection1.png', 'png');
imwrite(outputImage2, 'Outputs/simpleskydetection2.png', 'png');
imwrite(outputImage3, 'Outputs/simpleskydetection3.png', 'png');
figure(2);
subplot(3,2,1); imagesc(imageMatrix1);
subplot(3,2,2); imagesc(outputImage1);
subplot(3,2,3); imagesc(imageMatrix2);
subplot(3,2,4); imagesc(outputImage2);
subplot(3,2,5); imagesc(imageMatrix3);
subplot(3,2,6); imagesc(outputImage3);
function outputImage = segmentation(imageMatrix)
R_MIN = 0;
R_MAX = 100;
G_MIN = 1;
G_MAX = 150;
B_MIN = 100;
B_MAX = 255;
sizeX = size(imageMatrix,1);
sizeY = size(imageMatrix,2);
outputImage = zeros(sizeX, sizeY);
for i = 1:1:sizeX
for j = 1:1:sizeY
redValue = imageMatrix(i,j,1);
greenValue = imageMatrix(i,j,2);
blueValue = imageMatrix(i,j,3);
isSky = true;
if(redValue < R_MIN || redValue > R_MAX)
isSky = false;
end
if(greenValue < G_MIN || greenValue > G_MAX)
isSky = false;
end
if(blueValue < B_MIN || blueValue > B_MAX)
isSky = false;
end
if(isSky == true)
outputImage(i,j,1) = 255;
outputImage(i,j,2) = 255;
outputImage(i,j,3) = 255;
else
outputImage(i,j,1) = 0;
outputImage(i,j,2) = 0;
outputImage(i,j,3) = 0;
end
end
end
end
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
savepgm.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Functions/toolbox_calib/TOOLBOX_calib/savepgm.m
| 447 |
utf_8
|
b8fe9ed33cbd68ea4b83271b431e3667
|
%SAVEPGM Write a PGM format file
%
% SAVEPGM(filename, im)
%
% Saves the specified image array in a binary (P5) format PGM image file.
%
% SEE ALSO: loadpgm
%
% Copyright (c) Peter Corke, 1999 Machine Vision Toolbox for Matlab
% Peter Corke 1994
function savepgm(fname, im)
fid = fopen(fname, 'w');
[r,c] = size(im');
fprintf(fid, 'P5\n');
fprintf(fid, '%d %d\n', r, c);
fprintf(fid, '255\n');
fwrite(fid, im', 'uchar');
fclose(fid);
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
ginput4.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Functions/toolbox_calib/TOOLBOX_calib/ginput4.m
| 7,121 |
utf_8
|
1d7231b0daed3533514a77f79f4e096a
|
function [out1,out2,out3] = ginput4(arg1)
[out1,out2,out3] = ginput(arg1);
return;
%GINPUT Graphical input from mouse.
% [X,Y] = GINPUT(N) gets N points from the current axes and returns
% the X- and Y-coordinates in length N vectors X and Y. The cursor
% can be positioned using a mouse (or by using the Arrow Keys on some
% systems). Data points are entered by pressing a mouse button
% or any key on the keyboard except carriage return, which terminates
% the input before N points are entered.
%
% [X,Y] = GINPUT gathers an unlimited number of points until the
% return key is pressed.
%
% [X,Y,BUTTON] = GINPUT(N) returns a third result, BUTTON, that
% contains a vector of integers specifying which mouse button was
% used (1,2,3 from left) or ASCII numbers if a key on the keyboard
% was used.
%
% Examples:
% [x,y] = ginput;
%
% [x,y] = ginput(5);
%
% [x, y, button] = ginput(1);
%
% See also GTEXT, UIRESTORE, UISUSPEND, WAITFORBUTTONPRESS.
% Copyright 1984-2006 The MathWorks, Inc.
% $Revision: 5.32.4.9 $ $Date: 2006/12/20 07:19:10 $
P = NaN*ones(16,16);
P(1:15,1:15) = 2*ones(15,15);
P(2:14,2:14) = ones(13,13);
P(3:13,3:13) = NaN*ones(11,11);
P(6:10,6:10) = 2*ones(5,5);
P(7:9,7:9) = 1*ones(3,3);
out1 = []; out2 = []; out3 = []; y = [];
c = computer;
if ~strcmp(c(1:2),'PC')
tp = get(0,'TerminalProtocol');
else
tp = 'micro';
end
if ~strcmp(tp,'none') && ~strcmp(tp,'x') && ~strcmp(tp,'micro'),
if nargout == 1,
if nargin == 1,
out1 = trmginput(arg1);
else
out1 = trmginput;
end
elseif nargout == 2 || nargout == 0,
if nargin == 1,
[out1,out2] = trmginput(arg1);
else
[out1,out2] = trmginput;
end
if nargout == 0
out1 = [ out1 out2 ];
end
elseif nargout == 3,
if nargin == 1,
[out1,out2,out3] = trmginput(arg1);
else
[out1,out2,out3] = trmginput;
end
end
else
fig = gcf;
figure(gcf);
if nargin == 0
how_many = -1;
b = [];
else
how_many = arg1;
b = [];
if ischar(how_many) ...
|| size(how_many,1) ~= 1 || size(how_many,2) ~= 1 ...
|| ~(fix(how_many) == how_many) ...
|| how_many < 0
error('MATLAB:ginput:NeedPositiveInt', 'Requires a positive integer.')
end
if how_many == 0
ptr_fig = 0;
while(ptr_fig ~= fig)
ptr_fig = get(0,'PointerWindow');
end
scrn_pt = get(0,'PointerLocation');
loc = get(fig,'Position');
pt = [scrn_pt(1) - loc(1), scrn_pt(2) - loc(2)];
out1 = pt(1); y = pt(2);
elseif how_many < 0
error('MATLAB:ginput:InvalidArgument', 'Argument must be a positive integer.')
end
end
% Suspend figure functions
state = uisuspend(fig);
toolbar = findobj(allchild(fig),'flat','Type','uitoolbar');
if ~isempty(toolbar)
ptButtons = [uigettool(toolbar,'Plottools.PlottoolsOff'), ...
uigettool(toolbar,'Plottools.PlottoolsOn')];
ptState = get (ptButtons,'Enable');
set (ptButtons,'Enable','off');
end
%set(fig,'pointer','fullcrosshair');
set(fig,'Pointer','custom','PointerShapeCData',P,'PointerShapeHotSpot',[8,8]);
fig_units = get(fig,'units');
char = 0;
% We need to pump the event queue on unix
% before calling WAITFORBUTTONPRESS
drawnow
while how_many ~= 0
% Use no-side effect WAITFORBUTTONPRESS
waserr = 0;
try
keydown = wfbp;
catch
waserr = 1;
end
if(waserr == 1)
if(ishandle(fig))
set(fig,'units',fig_units);
uirestore(state);
error('MATLAB:ginput:Interrupted', 'Interrupted');
else
error('MATLAB:ginput:FigureDeletionPause', 'Interrupted by figure deletion');
end
end
ptr_fig = get(0,'CurrentFigure');
if(ptr_fig == fig)
if keydown
char = get(fig, 'CurrentCharacter');
button = abs(get(fig, 'CurrentCharacter'));
scrn_pt = get(0, 'PointerLocation');
set(fig,'units','pixels')
loc = get(fig, 'Position');
% We need to compensate for an off-by-one error:
pt = [scrn_pt(1) - loc(1) + 1, scrn_pt(2) - loc(2) + 1];
set(fig,'CurrentPoint',pt);
else
button = get(fig, 'SelectionType');
if strcmp(button,'open')
button = 1;
elseif strcmp(button,'normal')
button = 1;
elseif strcmp(button,'extend')
button = 2;
elseif strcmp(button,'alt')
button = 3;
else
error('MATLAB:ginput:InvalidSelection', 'Invalid mouse selection.')
end
end
pt = get(gca, 'CurrentPoint');
how_many = how_many - 1;
if(char == 13) % & how_many ~= 0)
% if the return key was pressed, char will == 13,
% and that's our signal to break out of here whether
% or not we have collected all the requested data
% points.
% If this was an early breakout, don't include
% the <Return> key info in the return arrays.
% We will no longer count it if it's the last input.
break;
end
out1 = [out1;pt(1,1)];
y = [y;pt(1,2)];
b = [b;button];
end
end
uirestore(state);
if ~isempty(toolbar) && ~isempty(ptButtons)
set (ptButtons(1),'Enable',ptState{1});
set (ptButtons(2),'Enable',ptState{2});
end
set(fig,'units',fig_units);
if nargout > 1
out2 = y;
if nargout > 2
out3 = b;
end
else
out1 = [out1 y];
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function key = wfbp
%WFBP Replacement for WAITFORBUTTONPRESS that has no side effects.
fig = gcf;
current_char = [];
% Now wait for that buttonpress, and check for error conditions
waserr = 0;
try
h=findall(fig,'type','uimenu','accel','C'); % Disabling ^C for edit menu so the only ^C is for
set(h,'accel',''); % interrupting the function.
keydown = waitforbuttonpress;
current_char = double(get(fig,'CurrentCharacter')); % Capturing the character.
if~isempty(current_char) && (keydown == 1) % If the character was generated by the
if(current_char == 3) % current keypress AND is ^C, set 'waserr'to 1
waserr = 1; % so that it errors out.
end
end
set(h,'accel','C'); % Set back the accelerator for edit menu.
catch
waserr = 1;
end
drawnow;
if(waserr == 1)
set(h,'accel','C'); % Set back the accelerator if it errored out.
error('MATLAB:ginput:Interrupted', 'Interrupted');
end
if nargout>0, key = keydown; end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
loadinr.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Functions/toolbox_calib/TOOLBOX_calib/loadinr.m
| 1,029 |
utf_8
|
ac39329cc5acba186f4c5ef4c62f3a33
|
%LOADINR Load an INRIMAGE format file
%
% LOADINR(filename, im)
%
% Load an INRIA image format file and return it as a matrix
%
% SEE ALSO: saveinr
%
% Copyright (c) Peter Corke, 1999 Machine Vision Toolbox for Matlab
% Peter Corke 1996
function im = loadinr(fname, im)
fid = fopen(fname, 'r');
s = fgets(fid);
if strcmp(s(1:12), '#INRIMAGE-4#') == 0,
error('not INRIMAGE format');
end
% not very complete, only looks for the X/YDIM keys
while 1,
s = fgets(fid);
n = length(s) - 1;
if s(1) == '#',
break
end
if strcmp(s(1:5), 'XDIM='),
cols = str2num(s(6:n));
end
if strcmp(s(1:5), 'YDIM='),
rows = str2num(s(6:n));
end
if strcmp(s(1:4), 'CPU='),
if strcmp(s(5:n), 'sun') == 0,
error('not sun data ordering');
end
end
end
disp(['INRIMAGE format file ' num2str(rows) ' x ' num2str(cols)])
% now the binary data
fseek(fid, 256, 'bof');
[im count] = fread(fid, [cols rows], 'float32');
im = im';
if count ~= (rows*cols),
error('file too short');
end
fclose(fid);
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
saveppm.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Functions/toolbox_calib/TOOLBOX_calib/saveppm.m
| 722 |
utf_8
|
9904ad3d075a120ca32bd9c10e019512
|
%SAVEPPM Write a PPM format file
%
% SAVEPPM(filename, I)
%
% Saves the specified red, green and blue planes in a binary (P6)
% format PPM image file.
%
% SEE ALSO: loadppm
%
% Copyright (c) Peter Corke, 1999 Machine Vision Toolbox for Matlab
% Peter Corke 1994
function saveppm(fname, I)
I = double(I);
if size(I,3) == 1,
R = I;
G = I;
B = I;
else
R = I(:,:,1);
G = I(:,:,2);
B = I(:,:,3);
end;
%keyboard;
fid = fopen(fname, 'w');
[r,c] = size(R');
fprintf(fid, 'P6\n');
fprintf(fid, '%d %d\n', r, c);
fprintf(fid, '255\n');
R = R';
G = G';
B = B';
im = [R(:) G(:) B(:)];
%im = reshape(im,r,c*3);
im = im';
%im = im(:);
fwrite(fid, im, 'uchar');
fclose(fid);
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
ginput3.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Functions/toolbox_calib/TOOLBOX_calib/ginput3.m
| 6,344 |
utf_8
|
1cc27af57f9872f05bbf0d9b8a0fdbc9
|
function [out1,out2,out3] = ginput2(arg1)
%GINPUT Graphical input from mouse.
% [X,Y] = GINPUT(N) gets N points from the current axes and returns
% the X- and Y-coordinates in length N vectors X and Y. The cursor
% can be positioned using a mouse (or by using the Arrow Keys on some
% systems). Data points are entered by pressing a mouse button
% or any key on the keyboard except carriage return, which terminates
% the input before N points are entered.
%
% [X,Y] = GINPUT gathers an unlimited number of points until the
% return key is pressed.
%
% [X,Y,BUTTON] = GINPUT(N) returns a third result, BUTTON, that
% contains a vector of integers specifying which mouse button was
% used (1,2,3 from left) or ASCII numbers if a key on the keyboard
% was used.
% Copyright (c) 1984-96 by The MathWorks, Inc.
% $Revision: 5.18 $ $Date: 1996/11/10 17:48:08 $
% Fixed version by Jean-Yves Bouguet to have a cross instead of 2 lines
% More visible for images
P = NaN*ones(16,16);
P(1:15,1:15) = 2*ones(15,15);
P(2:14,2:14) = ones(13,13);
P(3:13,3:13) = NaN*ones(11,11);
P(6:10,6:10) = 2*ones(5,5);
P(7:9,7:9) = 1*ones(3,3);
out1 = []; out2 = []; out3 = []; y = [];
c = computer;
if ~strcmp(c(1:2),'PC') & ~strcmp(c(1:2),'MA')
tp = get(0,'TerminalProtocol');
else
tp = 'micro';
end
if ~strcmp(tp,'none') & ~strcmp(tp,'x') & ~strcmp(tp,'micro'),
if nargout == 1,
if nargin == 1,
eval('out1 = trmginput(arg1);');
else
eval('out1 = trmginput;');
end
elseif nargout == 2 | nargout == 0,
if nargin == 1,
eval('[out1,out2] = trmginput(arg1);');
else
eval('[out1,out2] = trmginput;');
end
if nargout == 0
out1 = [ out1 out2 ];
end
elseif nargout == 3,
if nargin == 1,
eval('[out1,out2,out3] = trmginput(arg1);');
else
eval('[out1,out2,out3] = trmginput;');
end
end
else
fig = gcf;
figure(gcf);
if nargin == 0
how_many = -1;
b = [];
else
how_many = arg1;
b = [];
if isstr(how_many) ...
| size(how_many,1) ~= 1 | size(how_many,2) ~= 1 ...
| ~(fix(how_many) == how_many) ...
| how_many < 0
error('Requires a positive integer.')
end
if how_many == 0
ptr_fig = 0;
while(ptr_fig ~= fig)
ptr_fig = get(0,'PointerWindow');
end
scrn_pt = get(0,'PointerLocation');
loc = get(fig,'Position');
pt = [scrn_pt(1) - loc(1), scrn_pt(2) - loc(2)];
out1 = pt(1); y = pt(2);
elseif how_many < 0
error('Argument must be a positive integer.')
end
end
pointer = get(gcf,'pointer');
set(gcf,'Pointer','custom','PointerShapeCData',P,'PointerShapeHotSpot',[8,8]);
%set(gcf,'pointer','crosshair');
fig_units = get(fig,'units');
char = 0;
while how_many ~= 0
% Use no-side effect WAITFORBUTTONPRESS
waserr = 0;
eval('keydown = wfbp;', 'waserr = 1;');
if(waserr == 1)
if(ishandle(fig))
set(fig,'pointer',pointer,'units',fig_units);
error('Interrupted');
else
error('Interrupted by figure deletion');
end
end
ptr_fig = get(0,'CurrentFigure');
if(ptr_fig == fig)
if keydown
char = get(fig, 'CurrentCharacter');
button = abs(get(fig, 'CurrentCharacter'));
scrn_pt = get(0, 'PointerLocation');
set(fig,'units','pixels')
loc = get(fig, 'Position');
pt = [scrn_pt(1) - loc(1), scrn_pt(2) - loc(2)];
set(fig,'CurrentPoint',pt);
else
button = get(fig, 'SelectionType');
if strcmp(button,'open')
button = 1; %b(length(b));
elseif strcmp(button,'normal')
button = 1;
elseif strcmp(button,'extend')
button = 2;
elseif strcmp(button,'alt')
button = 3;
else
error('Invalid mouse selection.')
end
end
pt = get(gca, 'CurrentPoint');
how_many = how_many - 1;
if(char == 13) % & how_many ~= 0)
% if the return key was pressed, char will == 13,
% and that's our signal to break out of here whether
% or not we have collected all the requested data
% points.
% If this was an early breakout, don't include
% the <Return> key info in the return arrays.
% We will no longer count it if it's the last input.
break;
end
out1 = [out1;pt(1,1)];
y = [y;pt(1,2)];
b = [b;button];
end
end
set(fig,'pointer',pointer,'units',fig_units);
if nargout > 1
out2 = y;
if nargout > 2
out3 = b;
end
else
out1 = [out1 y];
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function key = wfbp
%WFBP Replacement for WAITFORBUTTONPRESS that has no side effects.
% Remove figure button functions
fprops = {'windowbuttonupfcn','buttondownfcn', ...
'windowbuttondownfcn','windowbuttonmotionfcn'};
fig = gcf;
fvals = get(fig,fprops);
set(fig,fprops,{'','','',''})
% Remove all other buttondown functions
ax = findobj(fig,'type','axes');
if isempty(ax)
ch = {};
else
ch = get(ax,{'Children'});
end
for i=1:length(ch),
ch{i} = ch{i}(:)';
end
h = [ax(:)',ch{:}];
vals = get(h,{'buttondownfcn'});
mt = repmat({''},size(vals));
set(h,{'buttondownfcn'},mt);
% Now wait for that buttonpress, and check for error conditions
waserr = 0;
eval(['if nargout==0,', ...
' waitforbuttonpress,', ...
'else,', ...
' keydown = waitforbuttonpress;',...
'end' ], 'waserr = 1;');
% Put everything back
if(ishandle(fig))
set(fig,fprops,fvals)
set(h,{'buttondownfcn'},vals)
end
if(waserr == 1)
error('Interrupted');
end
if nargout>0, key = keydown; end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
ginput2.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Functions/toolbox_calib/TOOLBOX_calib/ginput2.m
| 6,105 |
utf_8
|
983a72db9a079ba54ab084149ced6ae9
|
function [out1,out2,out3] = ginput2(arg1)
%GINPUT Graphical input from mouse.
% [X,Y] = GINPUT(N) gets N points from the current axes and returns
% the X- and Y-coordinates in length N vectors X and Y. The cursor
% can be positioned using a mouse (or by using the Arrow Keys on some
% systems). Data points are entered by pressing a mouse button
% or any key on the keyboard except carriage return, which terminates
% the input before N points are entered.
%
% [X,Y] = GINPUT gathers an unlimited number of points until the
% return key is pressed.
%
% [X,Y,BUTTON] = GINPUT(N) returns a third result, BUTTON, that
% contains a vector of integers specifying which mouse button was
% used (1,2,3 from left) or ASCII numbers if a key on the keyboard
% was used.
% Copyright (c) 1984-96 by The MathWorks, Inc.
% $Revision: 5.18 $ $Date: 1996/11/10 17:48:08 $
% Fixed version by Jean-Yves Bouguet to have a cross instead of 2 lines
% More visible for images
out1 = []; out2 = []; out3 = []; y = [];
c = computer;
if ~strcmp(c(1:2),'PC') & ~strcmp(c(1:2),'MA')
tp = get(0,'TerminalProtocol');
else
tp = 'micro';
end
if ~strcmp(tp,'none') & ~strcmp(tp,'x') & ~strcmp(tp,'micro'),
if nargout == 1,
if nargin == 1,
eval('out1 = trmginput(arg1);');
else
eval('out1 = trmginput;');
end
elseif nargout == 2 | nargout == 0,
if nargin == 1,
eval('[out1,out2] = trmginput(arg1);');
else
eval('[out1,out2] = trmginput;');
end
if nargout == 0
out1 = [ out1 out2 ];
end
elseif nargout == 3,
if nargin == 1,
eval('[out1,out2,out3] = trmginput(arg1);');
else
eval('[out1,out2,out3] = trmginput;');
end
end
else
fig = gcf;
figure(gcf);
if nargin == 0
how_many = -1;
b = [];
else
how_many = arg1;
b = [];
if isstr(how_many) ...
| size(how_many,1) ~= 1 | size(how_many,2) ~= 1 ...
| ~(fix(how_many) == how_many) ...
| how_many < 0
error('Requires a positive integer.')
end
if how_many == 0
ptr_fig = 0;
while(ptr_fig ~= fig)
ptr_fig = get(0,'PointerWindow');
end
scrn_pt = get(0,'PointerLocation');
loc = get(fig,'Position');
pt = [scrn_pt(1) - loc(1), scrn_pt(2) - loc(2)];
out1 = pt(1); y = pt(2);
elseif how_many < 0
error('Argument must be a positive integer.')
end
end
pointer = get(gcf,'pointer');
set(gcf,'pointer','crosshair');
fig_units = get(fig,'units');
char = 0;
while how_many ~= 0
% Use no-side effect WAITFORBUTTONPRESS
waserr = 0;
eval('keydown = wfbp;', 'waserr = 1;');
if(waserr == 1)
if(ishandle(fig))
set(fig,'pointer',pointer,'units',fig_units);
error('Interrupted');
else
error('Interrupted by figure deletion');
end
end
ptr_fig = get(0,'CurrentFigure');
if(ptr_fig == fig)
if keydown
char = get(fig, 'CurrentCharacter');
button = abs(get(fig, 'CurrentCharacter'));
scrn_pt = get(0, 'PointerLocation');
set(fig,'units','pixels')
loc = get(fig, 'Position');
pt = [scrn_pt(1) - loc(1), scrn_pt(2) - loc(2)];
set(fig,'CurrentPoint',pt);
else
button = get(fig, 'SelectionType');
if strcmp(button,'open')
button = 1; %b(length(b));
elseif strcmp(button,'normal')
button = 1;
elseif strcmp(button,'extend')
button = 2;
elseif strcmp(button,'alt')
button = 3;
else
error('Invalid mouse selection.')
end
end
pt = get(gca, 'CurrentPoint');
how_many = how_many - 1;
if(char == 13) % & how_many ~= 0)
% if the return key was pressed, char will == 13,
% and that's our signal to break out of here whether
% or not we have collected all the requested data
% points.
% If this was an early breakout, don't include
% the <Return> key info in the return arrays.
% We will no longer count it if it's the last input.
break;
end
out1 = [out1;pt(1,1)];
y = [y;pt(1,2)];
b = [b;button];
end
end
set(fig,'pointer',pointer,'units',fig_units);
if nargout > 1
out2 = y;
if nargout > 2
out3 = b;
end
else
out1 = [out1 y];
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function key = wfbp
%WFBP Replacement for WAITFORBUTTONPRESS that has no side effects.
% Remove figure button functions
fprops = {'windowbuttonupfcn','buttondownfcn', ...
'windowbuttondownfcn','windowbuttonmotionfcn'};
fig = gcf;
fvals = get(fig,fprops);
set(fig,fprops,{'','','',''})
% Remove all other buttondown functions
ax = findobj(fig,'type','axes');
if isempty(ax)
ch = {};
else
ch = get(ax,{'Children'});
end
for i=1:length(ch),
ch{i} = ch{i}(:)';
end
h = [ax(:)',ch{:}];
vals = get(h,{'buttondownfcn'});
mt = repmat({''},size(vals));
set(h,{'buttondownfcn'},mt);
% Now wait for that buttonpress, and check for error conditions
waserr = 0;
eval(['if nargout==0,', ...
' waitforbuttonpress,', ...
'else,', ...
' keydown = waitforbuttonpress;',...
'end' ], 'waserr = 1;');
% Put everything back
if(ishandle(fig))
set(fig,fprops,fvals)
set(h,{'buttondownfcn'},vals)
end
if(waserr == 1)
error('Interrupted');
end
if nargout>0, key = keydown; end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
loadppm.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Functions/toolbox_calib/TOOLBOX_calib/loadppm.m
| 2,356 |
utf_8
|
341aee7d75f529ff3425160291592356
|
%LOADPPM Load a PPM image
%
% I = loadppm(filename)
%
% Returns a matrix containing the image loaded from the PPM format
% file filename. Handles ASCII (P3) and binary (P6) PPM file formats.
%
% If the filename has no extension, and open fails, a '.ppm' and
% '.pnm' extension will be tried.
%
% SEE ALSO: saveppm loadpgm
%
% Copyright (c) Peter Corke, 1999 Machine Vision Toolbox for Matlab
% Peter Corke 1994
function I = loadppm(file)
white = [' ' 9 10 13]; % space, tab, lf, cr
white = setstr(white);
fid = fopen(file, 'r');
if fid < 0,
fid = fopen([file '.ppm'], 'r');
end
if fid < 0,
fid = fopen([file '.pnm'], 'r');
end
if fid < 0,
error('Couldn''t open file');
end
magic = fread(fid, 2, 'char');
while 1
c = fread(fid,1,'char');
if c == '#',
fgetl(fid);
elseif ~any(c == white)
fseek(fid, -1, 'cof'); % unputc()
break;
end
end
cols = fscanf(fid, '%d', 1);
while 1
c = fread(fid,1,'char');
if c == '#',
fgetl(fid);
elseif ~any(c == white)
fseek(fid, -1, 'cof'); % unputc()
return;
end
end
rows = fscanf(fid, '%d', 1);
while 1
c = fread(fid,1,'char');
if c == '#',
fgetl(fid);
elseif ~any(c == white)
fseek(fid, -1, 'cof'); % unputc()
break;
end
end
maxval = fscanf(fid, '%d', 1);
% assume a carriage return only:
c = fread(fid,1,'char');
% bug: because the image might be starting with special characters!
%while 1
% c = fread(fid,1,'char');
% if c == '#',
% fgetl(fid);
% elseif ~any(c == white)
% fseek(fid, -1, 'cof'); % unputc()
% break;
% end
%end
if magic(1) == 'P',
if magic(2) == '3',
%disp(['ASCII PPM file ' num2str(rows) ' x ' num2str(cols)])
I = fscanf(fid, '%d', [cols*3 rows]);
elseif magic(2) == '6',
%disp(['Binary PPM file ' num2str(rows) ' x ' num2str(cols)])
if maxval == 1,
fmt = 'unint1';
elseif maxval == 15,
fmt = 'uint4';
elseif maxval == 255,
fmt = 'uint8';
elseif maxval == 2^32-1,
fmt = 'uint32';
end
I = fread(fid, [cols*3 rows], fmt);
else
disp('Not a PPM file');
end
end
%
% now the matrix has interleaved columns of R, G, B
%
I = I';
size(I);
R = I(:,1:3:(cols*3));
G = I(:,2:3:(cols*3));
B = I(:,3:3:(cols*3));
fclose(fid);
I = zeros(rows,cols,3);
I(:,:,1) = R;
I(:,:,2) = G;
I(:,:,3) = B;
I = uint8(I);
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
saveinr.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Functions/toolbox_calib/TOOLBOX_calib/saveinr.m
| 949 |
utf_8
|
a18df4fba021be006842fbc35166bc23
|
%SAVEINR Write an INRIMAGE format file
%
% SAVEINR(filename, im)
%
% Saves the specified image array in a INRIA image format file.
%
% SEE ALSO: loadinr
%
% Copyright (c) Peter Corke, 1999 Machine Vision Toolbox for Matlab
% Peter Corke 1996
function saveinr(fname, im)
fid = fopen(fname, 'w');
[r,c] = size(im');
% build the header
hdr = [];
s = sprintf('#INRIMAGE-4#{\n');
hdr = [hdr s];
s = sprintf('XDIM=%d\n',c);
hdr = [hdr s];
s = sprintf('YDIM=%d\n',r);
hdr = [hdr s];
s = sprintf('ZDIM=1\n');
hdr = [hdr s];
s = sprintf('VDIM=1\n');
hdr = [hdr s];
s = sprintf('TYPE=float\n');
hdr = [hdr s];
s = sprintf('PIXSIZE=32\n');
hdr = [hdr s];
s = sprintf('SCALE=2**0\n');
hdr = [hdr s];
s = sprintf('CPU=sun\n#');
hdr = [hdr s];
% make it 256 bytes long and write it
hdr256 = zeros(1,256);
hdr256(1:length(hdr)) = hdr;
fwrite(fid, hdr256, 'uchar');
% now the binary data
fwrite(fid, im', 'float32');
fclose(fid)
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
stereo_gui.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Functions/toolbox_calib/TOOLBOX_calib/stereo_gui.m
| 6,208 |
utf_8
|
6cc48675fdf9c8c36bc147da7d046d06
|
% stereo_gui
% Stereo Camera Calibration Toolbox (two cameras, internal and external calibration):
%
% It is assumed that the two cameras (left and right) have been calibrated with the pattern at the same 3D locations, and the same points
% on the pattern (select the same grid points). Therefore, in particular, the same number of images were used to calibrate both cameras.
% The two calibration result files must have been saved under two seperate data files (Calib_Results_left.mat and Calib_Results_right.mat)
% prior to running this toolbox. To generate the two files, run the classic Camera Calibration toolbox calib.m.
%
% INPUT: Calib_Results_left.mat, Calib_Results_right.mat -> Generated by the standard calibration toolbox on the two cameras individually
% OUTPUT: Calib_Results_stereo.mat -> The saved result after global stereo calibration (after running stereo calibration, and hitting Save stereo calib results)
%
% Main result variables stored in Calib_Results_stereo.mat:
% om, R, T: relative rotation and translation of the right camera wrt the left camera
% fc_left, cc_left, kc_left, alpha_c_left, KK_left: New intrinsic parameters of the left camera
% fc_right, cc_right, kc_right, alpha_c_right, KK_right: New intrinsic parameters of the right camera
%
% Both sets of intrinsic parameters are equivalent to the classical {fc,cc,kc,alpha_c,KK} described online at:
% http://www.vision.caltech.edu/bouguetj/calib_doc/parameters.html
%
% Note: If you do not want to recompute the intinsic parameters, through stereo calibration you may want to set
% recompute_intrinsic_right and recompute_intrinsic_left to zero, prior to running stereo calibration. Default: 1
%
% Definition of the extrinsic parameters: R and om are related through the rodrigues formula (R=rodrigues(om)).
% Consider a point P of coordinates XL and XR in the left and right camera reference frames respectively.
% XL and XR are related to each other through the following rigid motion transformation:
% XR = R * XL + T
% R and T (or equivalently om and T) fully describe the relative displacement of the two cameras.
%
%
% If the Warning message "Disabling view kk - Reason: the left and right images are found inconsistent" is encountered during stereo calibration,
% that probably means that for the kkth pair of images, the left and right images are found to have captured the calibration pattern at two
% different locations in space. That means that the two views are not consistent, and therefore cannot be used for stereo calibration.
% When capturing your images, make sure that you do not move the calibration pattern between capturing the left and the right images.
% The pattwern can (and should) be moved in space only between two sets of (left,right) images.
% Another reason for inconsistency is that you selected a different set of points on the pattern when running the separate calibrations
% (leading to the two files Calib_Results_left.mat and Calib_Results_left.mat). Make sure that the same points are selected in the
% two separate calibration. In other words, the points need to correspond.
% (c) Jean-Yves Bouguet - Intel Corporation
% October 25, 2001 -- Last updated June 14, 2004
function stereo_gui,
cell_list = {};
%-------- Begin editable region -------------%
%-------- Begin editable region -------------%
fig_number = 1;
title_figure = 'Stereo Camera Calibration Toolbox';
cell_list{1,1} = {'Load left and right calibration files','load_stereo_calib_files;'};
cell_list{1,2} = {'Run stereo calibration','go_calib_stereo;'};
cell_list{2,1} = {'Show Extrinsics of stereo rig','ext_calib_stereo;'};
cell_list{2,2} = {'Show Intrinsic parameters','show_stereo_calib_results;'};
cell_list{3,1} = {'Save stereo calib results','saving_stereo_calib;'};
cell_list{3,2} = {'Load stereo calib results','loading_stereo_calib;'};
cell_list{4,1} = {'Rectify the calibration images','rectify_stereo_pair;'};
cell_list{4,2} = {'Exit',['disp(''Bye. To run again, type stereo_gui.''); close(' num2str(fig_number) ');']}; %{'Exit','calib_gui;'};
show_window(cell_list,fig_number,title_figure,150,14);
%-------- End editable region -------------%
%-------- End editable region -------------%
%------- DO NOT EDIT ANYTHING BELOW THIS LINE -----------%
function show_window(cell_list,fig_number,title_figure,x_size,y_size,gap_x,font_name,font_size)
if ~exist('cell_list'),
error('No description of the functions');
end;
if ~exist('fig_number'),
fig_number = 1;
end;
if ~exist('title_figure'),
title_figure = '';
end;
if ~exist('x_size'),
x_size = 85;
end;
if ~exist('y_size'),
y_size = 14;
end;
if ~exist('gap_x'),
gap_x = 0;
end;
if ~exist('font_name'),
font_name = 'clean';
end;
if ~exist('font_size'),
font_size = 8;
end;
figure(fig_number); clf;
pos = get(fig_number,'Position');
[n_row,n_col] = size(cell_list);
fig_size_x = x_size*n_col+(n_col+1)*gap_x;
fig_size_y = y_size*n_row+(n_row+1)*gap_x;
set(fig_number,'Units','points', ...
'BackingStore','off', ...
'Color',[0.8 0.8 0.8], ...
'MenuBar','none', ...
'Resize','off', ...
'Name',title_figure, ...
'Position',[pos(1) pos(2) fig_size_x fig_size_y], ...
'NumberTitle','off'); %,'WindowButtonMotionFcn',['figure(' num2str(fig_number) ');']);
h_mat = zeros(n_row,n_col);
posx = zeros(n_row,n_col);
posy = zeros(n_row,n_col);
for i=n_row:-1:1,
for j = n_col:-1:1,
posx(i,j) = gap_x+(j-1)*(x_size+gap_x);
posy(i,j) = fig_size_y - i*(gap_x+y_size);
end;
end;
for i=n_row:-1:1,
for j = n_col:-1:1,
if ~isempty(cell_list{i,j}),
if ~isempty(cell_list{i,j}{1}) & ~isempty(cell_list{i,j}{2}),
h_mat(i,j) = uicontrol('Parent',fig_number, ...
'Units','points', ...
'Callback',cell_list{i,j}{2}, ...
'ListboxTop',0, ...
'Position',[posx(i,j) posy(i,j) x_size y_size], ...
'String',cell_list{i,j}{1}, ...
'fontsize',font_size,...
'fontname',font_name,...
'Tag','Pushbutton1');
end;
end;
end;
end;
%------ END PROTECTED REGION ----------------%
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
loadpgm.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Functions/toolbox_calib/TOOLBOX_calib/loadpgm.m
| 1,838 |
utf_8
|
6ec18330c2633d5519c72eb2e6fe963b
|
%LOADPGM Load a PGM image
%
% I = loadpgm(filename)
%
% Returns a matrix containing the image loaded from the PGM format
% file filename. Handles ASCII (P2) and binary (P5) PGM file formats.
%
% If the filename has no extension, and open fails, a '.pgm' will
% be appended.
%
%
% Copyright (c) Peter Corke, 1999 Machine Vision Toolbox for Matlab
% Peter Corke 1994
function I = loadpgm(file)
white = [' ' 9 10 13]; % space, tab, lf, cr
white = setstr(white);
fid = fopen(file, 'r');
if fid < 0,
fid = fopen([file '.pgm'], 'r');
end
if fid < 0,
error('Couldn''t open file');
end
magic = fread(fid, 2, 'char');
while 1
c = fread(fid,1,'char');
if c == '#',
fgetl(fid);
elseif ~any(c == white)
fseek(fid, -1, 'cof'); % unputc()
break;
end
end
cols = fscanf(fid, '%d', 1);
while 1
c = fread(fid,1,'char');
if c == '#',
fgetl(fid);
elseif ~any(c == white)
fseek(fid, -1, 'cof'); % unputc()
break;
end
end
rows = fscanf(fid, '%d', 1);
while 1
c = fread(fid,1,'char');
if c == '#',
fgetl(fid);
elseif ~any(c == white)
fseek(fid, -1, 'cof'); % unputc()
break;
end
end
maxval = fscanf(fid, '%d', 1);
while 1
c = fread(fid,1,'char');
if c == '#',
fgetl(fid);
elseif ~any(c == white)
fseek(fid, -1, 'cof'); % unputc()
break;
end
end
if magic(1) == 'P',
if magic(2) == '2',
%disp(['ASCII PGM file ' num2str(rows) ' x ' num2str(cols)])
I = fscanf(fid, '%d', [cols rows])';
elseif magic(2) == '5',
%disp(['Binary PGM file ' num2str(rows) ' x ' num2str(cols)])
if maxval == 1,
fmt = 'unint1';
elseif maxval == 15,
fmt = 'uint4';
elseif maxval == 255,
fmt = 'uint8';
elseif maxval == 2^32-1,
fmt = 'uint32';
end
I = fread(fid, [cols rows], fmt)';
else
disp('Not a PGM file');
end
end
fclose(fid);
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
Quaternion2R.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Inputs/Assignment2_DATA/Assignment2_DATA/CODE/Quaternion2R.m
| 342 |
utf_8
|
0dc39a43367f00b5830e73144bf55f7c
|
function R = Quaternion2R(q)
q = q / norm(q);
R = [
q(1)^2 + q(2)^2 - q(3)^2 - q(4)^2, 2*(q(2)*q(3) - q(1)*q(4)), 2*(q(2)*q(4) + q(1)*q(3));
2*(q(2)*q(3) + q(1)*q(4)), q(1)^2-q(2)^2 + q(3)^2 - q(4)^2, 2*(q(3)*q(4) - q(1)*q(2));
2*(q(2)*q(4) - q(1)*q(3)), 2*(q(3)*q(4) + q(1)*q(2)), q(1)^2 - q(2)^2 - q(3)^2 + q(4)^2;
];
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
Register3DPointsQuaternion.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/Inputs/Assignment2_DATA/Assignment2_DATA/CODE/Register3DPointsQuaternion.m
| 1,501 |
utf_8
|
6535ceb941775580a6874cc4223f7f0c
|
% compute transformation from pointsA and poitnsB so that
% pointsB = R * pointsA + t
function finalTrans = Register3DPointsQuaternion(pointsA, pointsB)
% pointsA, pointsB - 3 x n matrices.
% clear all; close all; clc;
%
% pointsA = [5 6 8; 10 2 3; 18 9 10]';
%
% trueRotMat = RPY2Rot(10, 15, 30);
% trueTransVec = [10 7 33]';
% trueTrans = RT2Trans(trueRotMat, trueTransVec);
% truePose = Trans2Pose(trueTrans)'
%
% pointsB = trueTrans * [pointsA; ones(1, size(pointsA, 2))];
% pointsB = pointsB(1:3,:) ./ repmat(pointsB(4,:), 3, 1);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
numPoints = size(pointsA, 2);
% comptue centroid
centroidA = mean(pointsA, 2);
centroidB = mean(pointsB, 2);
% find rotation
pA = pointsA - repmat(centroidA, 1, numPoints);
pB = pointsB - repmat(centroidB, 1, numPoints);
M = zeros(3, 3);
for i=1:numPoints
M = M + pA(:,i) * pB(:,i)';
end
N = [
M(1,1)+M(2,2)+M(3,3), M(2,3)-M(3,2), M(3,1)-M(1,3), M(1,2)-M(2,1);
M(2,3)-M(3,2), M(1,1)-M(2,2)-M(3,3), M(1,2)+M(2,1), M(3,1)+M(1,3);
M(3,1)-M(1,3), M(1,2)+M(2,1), -M(1,1)+M(2,2)-M(3,3), M(2,3)+M(3,2);
M(1,2)-M(2,1), M(3,1)+M(1,3), M(2,3)+M(3,2), -M(1,1)-M(2,2)+M(3,3);
];
[V, D] = eig(N);
DVec = diag(D);
% sortedDVec = sort(DVec);
maxIdx = find(DVec == max(DVec));
qmin = V(:, maxIdx);
R = Quaternion2R(qmin);
% find translation given rotation
rotPointsA = R * pointsA;
rotCentroidA = mean(rotPointsA, 2);
t = centroidB - rotCentroidA;
finalTrans = RT2Trans(R, t);
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
appendimages.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/siftDemoV4/siftDemoV4/appendimages.m
| 461 |
utf_8
|
a7ad42558236d4f7bd90dc6e72631d54
|
% im = appendimages(image1, image2)
%
% Return a new image that appends the two images side-by-side.
function im = appendimages(image1, image2)
% Select the image with the fewest rows and fill in enough empty rows
% to make it the same height as the other image.
rows1 = size(image1,1);
rows2 = size(image2,1);
if (rows1 < rows2)
image1(rows2,1) = 0;
else
image2(rows1,1) = 0;
end
% Now append both images side-by-side.
im = [image1 image2];
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
showkeys.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/siftDemoV4/siftDemoV4/showkeys.m
| 1,699 |
utf_8
|
4e67466c0fd7739350cb2af5767e10a4
|
% showkeys(image, locs)
%
% This function displays an image with SIFT keypoints overlayed.
% Input parameters:
% image: the file name for the image (grayscale)
% locs: matrix in which each row gives a keypoint location (row,
% column, scale, orientation)
function showkeys(image, locs)
disp('Drawing SIFT keypoints ...');
% Draw image with keypoints
figure('Position', [50 50 size(image,2) size(image,1)]);
colormap('gray');
imagesc(image);
hold on;
imsize = size(image);
for i = 1: size(locs,1)
% Draw an arrow, each line transformed according to keypoint parameters.
TransformLine(imsize, locs(i,:), 0.0, 0.0, 1.0, 0.0);
TransformLine(imsize, locs(i,:), 0.85, 0.1, 1.0, 0.0);
TransformLine(imsize, locs(i,:), 0.85, -0.1, 1.0, 0.0);
end
hold off;
% ------ Subroutine: TransformLine -------
% Draw the given line in the image, but first translate, rotate, and
% scale according to the keypoint parameters.
%
% Parameters:
% Arrays:
% imsize = [rows columns] of image
% keypoint = [subpixel_row subpixel_column scale orientation]
%
% Scalars:
% x1, y1; begining of vector
% x2, y2; ending of vector
function TransformLine(imsize, keypoint, x1, y1, x2, y2)
% The scaling of the unit length arrow is set to approximately the radius
% of the region used to compute the keypoint descriptor.
len = 6 * keypoint(3);
% Rotate the keypoints by 'ori' = keypoint(4)
s = sin(keypoint(4));
c = cos(keypoint(4));
% Apply transform
r1 = keypoint(1) - len * (c * y1 + s * x1);
c1 = keypoint(2) + len * (- s * y1 + c * x1);
r2 = keypoint(1) - len * (c * y2 + s * x2);
c2 = keypoint(2) + len * (- s * y2 + c * x2);
line([c1 c2], [r1 r2], 'Color', 'c');
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
sift.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/siftDemoV4/siftDemoV4/sift.m
| 2,496 |
utf_8
|
7cdcf3bcc06643a2ec205788c77ac597
|
% [image, descriptors, locs] = sift(imageFile)
%
% This function reads an image and returns its SIFT keypoints.
% Input parameters:
% imageFile: the file name for the image.
%
% Returned:
% image: the image array in double format
% descriptors: a K-by-128 matrix, where each row gives an invariant
% descriptor for one of the K keypoints. The descriptor is a vector
% of 128 values normalized to unit length.
% locs: K-by-4 matrix, in which each row has the 4 values for a
% keypoint location (row, column, scale, orientation). The
% orientation is in the range [-PI, PI] radians.
%
% Credits: Thanks for initial version of this program to D. Alvaro and
% J.J. Guerrero, Universidad de Zaragoza (modified by D. Lowe)
function [image, descriptors, locs] = sift(imageFile)
% Load image
image = imread(imageFile);
% If you have the Image Processing Toolbox, you can uncomment the following
% lines to allow input of color images, which will be converted to grayscale.
if size(image, 3) == 3
image = rgb2gray(image);
end
[rows, cols] = size(image);
% Convert into PGM imagefile, readable by "keypoints" executable
f = fopen('tmp.pgm', 'w');
if f == -1
error('Could not create file tmp.pgm.');
end
fprintf(f, 'P5\n%d\n%d\n255\n', cols, rows);
fwrite(f, image', 'uint8');
fclose(f);
% Call keypoints executable
if isunix
command = '!./sift ';
else
command = '!siftWin32 ';
end
command = [command ' <tmp.pgm >tmp.key'];
eval(command);
% Open tmp.key and check its header
g = fopen('tmp.key', 'r');
if g == -1
error('Could not open file tmp.key.');
end
[header, count] = fscanf(g, '%d %d', [1 2]);
if count ~= 2
error('Invalid keypoint file beginning.');
end
num = header(1);
len = header(2);
if len ~= 128
error('Keypoint descriptor length invalid (should be 128).');
end
% Creates the two output matrices (use known size for efficiency)
locs = double(zeros(num, 4));
descriptors = double(zeros(num, 128));
% Parse tmp.key
for i = 1:num
[vector, count] = fscanf(g, '%f %f %f %f', [1 4]); %row col scale ori
if count ~= 4
error('Invalid keypoint file format');
end
locs(i, :) = vector(1, :);
[descrip, count] = fscanf(g, '%d', [1 len]);
if (count ~= 128)
error('Invalid keypoint file value.');
end
% Normalize each input vector to unit length
descrip = descrip / sqrt(sum(descrip.^2));
descriptors(i, :) = descrip(1, :);
end
fclose(g);
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
Q3VT.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/siftDemoV4/siftDemoV4/Q3VT.m
| 1,016 |
utf_8
|
6fa94d5010a5e371dfcd7bd64490424c
|
% Q3 - Image Based Location with Vocab Tree
function VT
dataBaseDescriptors = [];
queryDescriptors = [];
dataBaseImgOrder = [];
queryImageOrder = [];
files = dir('D:/Matlab Projects/project_2/Inputs/Assignment2_DATA/Assignment2_DATA/database/*.png');
% Build the Codebase of Descriptors
for file = files'
filePath = strcat('Assignment2_DATA/Assignment2_DATA/database/', + file.name);
dataBaseImgOrder = [dataBaseImgOrder, file.name];
[image, descriptors] = sift(filePath);
dataBaseDescriptors = [dataBaseDescriptors; descriptors];
end
%
[idx,C,sumd,D] = kmeans(dataBaseDescriptors, 1000);
files = dir('D:/Matlab Projects/project_2/Inputs/Assignment2_DATA/Assignment2_DATA/query/*.png');
for file = files'
clusterCount = matrix(1, 1000);
filePath = strcat('Assignment2_DATA/Assignment2_DATA/query/', + file.name);
queryImageOrder = [queryBaseImgOrder, file.name];
[image, descriptors] = sift(filePath);
queryDescriptors = [queryDescriptors; descriptors];
end
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
match.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/siftDemoV4/siftDemoV4/match.m
| 1,940 |
utf_8
|
e876f215400508c0507fd248db781333
|
% num = match(image1, image2)
%
% This function reads two images, finds their SIFT features, and
% displays lines connecting the matched keypoints. A match is accepted
% only if its distance is less than distRatio times the distance to the
% second closest match.
% It returns the number of matches displayed.
%
% Example: match('scene.pgm','book.pgm');
function num = match(image1, image2)
% Find SIFT keypoints for each image
[im1, des1, loc1] = sift(image1);
[im2, des2, loc2] = sift(image2);
% For efficiency in Matlab, it is cheaper to compute dot products between
% unit vectors rather than Euclidean distances. Note that the ratio of
% angles (acos of dot products of unit vectors) is a close approximation
% to the ratio of Euclidean distances for small angles.
%
% distRatio: Only keep matches in which the ratio of vector angles from the
% nearest to second nearest neighbor is less than distRatio.
distRatio = 0.6;
% For each descriptor in the first image, select its match to second image.
des2t = des2'; % Precompute matrix transpose
for i = 1 : size(des1,1)
dotprods = des1(i,:) * des2t; % Computes vector of dot products
[vals,indx] = sort(acos(dotprods)); % Take inverse cosine and sort results
% Check if nearest neighbor has angle less than distRatio times 2nd.
if (vals(1) < distRatio * vals(2))
match(i) = indx(1);
else
match(i) = 0;
end
end
% Create a new image showing the two images side by side.
im3 = appendimages(im1,im2);
% Show a figure with lines joining the accepted matches.
figure('Position', [100 100 size(im3,2) size(im3,1)]);
colormap('gray');
imagesc(im3);
hold on;
cols1 = size(im1,2);
for i = 1: size(des1,1)
if (match(i) > 0)
line([loc1(i,2) loc2(match(i),2)+cols1], ...
[loc1(i,1) loc2(match(i),1)], 'Color', 'c');
end
end
hold off;
num = sum(match > 0);
fprintf('Found %d matches.\n', num);
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
Q3BOW.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/project_2/siftDemoV4/siftDemoV4/Q3BOW.m
| 2,621 |
utf_8
|
349eabe80816699c0b002a45c4951563
|
% Q3 - Image Based Location with Bag of Words
function BOW
dataBaseDescriptors = [];
queryDescriptors = [];
dataBaseImgOrder = [];
queryImageOrder = [];
files = dir('D:/Matlab Projects/project_2/Inputs/Assignment2_DATA/Assignment2_DATA/database/*.png');
% Build the Codebase of Descriptors
for file = files'
filePath = strcat('Assignment2_DATA/Assignment2_DATA/database/', + file.name);
dataBaseImgOrder = [dataBaseImgOrder, file.name];
[image, descriptors] = sift(filePath);
dataBaseDescriptors = [dataBaseDescriptors; descriptors];
end
%
[idx,C,sumd,D] = kmeans(dataBaseDescriptors, 1000);
files = dir('D:/Matlab Projects/project_2/Inputs/Assignment2_DATA/Assignment2_DATA/query/*.png');
clusterQCounts = [];
for file = files'
clusterCount = matrix(1, 1000);
filePath = strcat('Assignment2_DATA/Assignment2_DATA/query/', + file.name);
queryImageOrder = [queryBaseImgOrder, file.name];
[image, descriptors] = sift(filePath);
queryDescriptors = [queryDescriptors; descriptors];
for row = 1:1:size(descriptors, 1)
int largestCluster = 0;
int largestDotProduct = 0;
for c = 1:1:size(C, 1)
value = dot(descriptors(row), C(c));
if largestDotProduct < dot(descriptors(row), C(c))
largestDotProduct = value;
largestCluster = c;
end
end
count = clusterCount(c);
count = count + 1;
clusterCount(c) = count;
end
clusterQCount = [clusterQCount; clusterCount];
end
files = dir('D:/Matlab Projects/project_2/Inputs/Assignment2_DATA/Assignment2_DATA/database/*.png');
clusterDCounts = [];
for file = files'
clusterCount = matrix(1, 1000);
filePath = strcat('Assignment2_DATA/Assignment2_DATA/database/', + file.name);
queryImageOrder = [queryBaseImgOrder, file.name];
[image, descriptors] = sift(filePath);
queryDescriptors = [queryDescriptors; descriptors];
for row = 1:1:size(descriptors, 1)
int largestCluster = 0;
int largestDotProduct = 0;
for c = 1:1:size(C, 1)
value = dot(descriptors(row), C(c));
if largestDotProduct < dot(descriptors(row), C(c))
largestDotProduct = value;
largestCluster = c;
end
end
count = clusterCount(c);
count = count + 1;
clusterCount(c) = count;
end
clusterDCount = [clusterCount; clusterCount];
end
|
github
|
RWEISCHEDEL/University-of-Utah-Coursework-master
|
matchExposures.m
|
.m
|
University-of-Utah-Coursework-master/CS 6320 - Computer Vision/Panorama - Final Project/matchExposures.m
| 2,853 |
utf_8
|
ae91ed3665fbf30805c02a26aedd688d
|
function [matchedImage] = matchExposures(images, transforms, performLoop)
numberImages = size(images, 4);
gammaList = ones(numberImages, 1);
for i = 2 : numberImages
gammaList(i) = matchImagePair(images(:, :, :, i - 1), images(:, :, :, i), transforms(:, :, i));
end
if performLoop
logGammaList = log(gammaList);
logGammaList(1) = [];
A = eye(nImgs - 2);
A = [A; -ones(1, numberImages - 2)];
updatedLogGammaList = A \ logGammaList;
updatedLogGammaList = [0; updatedLogGammaList];
finalGammas = exp(updatedLogGammaList);
accGammaList = ones(nImgs, 1);
for i = 2 : numberImages - 1
accGammaList(i) = accGammaList(i - 1) * finalGammas(i);
end
else
accGammaList = ones(numberImages, 1);
for i = 2 : numberImages
accGammaList(i) = accGammaList(i - 1) * gammaList(i);
end
end
matchedImage = zeros(size(images), 'uint8');
for i = 1 : numberImages
matchedImage(:, :, :, i) = gammaCorrection(images(:, :, :, i), accGammaList(i));
end
end
%% Match pairs of images
function [gammaVal] = matchImagePair(image1, image2, transformVal)
numberIterations = 1000;
alphaVal = 1;
sampleRatioVal = 0.01;
outlierThresholdVal = 1.0;
height = size(image1, 1);
width = size(image1, 2);
labImage1 = rgb2lab(image1);
labImage2 = rgb2lab(image2);
k = 1;
numberPixels = numel(image1);
numberSamples = round(numberPixels * sampleRatioVal);
samples = zeros(numberSamples, 2);
while true
pixel2 = [randi([1 height]); randi([1 width]); 1];
pixel1 = transformVal * pixel2;
pixel1 = pixel1 ./ pixel1(3);
if pixel1(1) >= 1 && pixel1(1) < height && pixel1(2) >= 1 && pixel1(2) < width
i = floor(pixel1(2));
a = pixel1(2) - i;
j = floor(pixel1(1));
b = pixel1(1) - j;
sample1 = (1 - a) * (1 - b) * labImage1(j, i, 1) + a * (1 - b) * labImage1(j, i + 1, 1) + a * b * labImage1(j + 1, i + 1, 1) + (1 - a) * b * labImage1(j + 1, i, 1);
sample2 = labImage2(pixel2(1), pixel2(2), 1);
if sample1 > outlierThresholdVal && sample2 > outlierThresholdVal
samples(k, 1) = sample1 / 100;
samples(k, 2) = sample2 / 100;
k = k + 1;
if k > numberSamples
break;
end
end
end
end
gammaVal = 1;
for i = 1 : numberIterations
gammaVal = gammaVal - alphaVal * sum((samples(:, 2) .^ gammaVal - samples(:, 1)) .* log(samples(:, 2)) .* (samples(:, 2) .^ gammaVal)) / numberSamples;
end
end
%% Perform Gamma Correction
function [gammaImage] = gammaCorrection(image, gammaVal)
labImage = rgb2lab(image);
labImage(:, :, 1) = (labImage(:, :, 1) / 100) .^ gammaVal * 100;
gammaImage = lab2rgb(labImage, 'OutputType', 'uint8');
end
|
github
|
albanie/mcnExtraLayers-master
|
setup_mcnExtraLayers.m
|
.m
|
mcnExtraLayers-master/setup_mcnExtraLayers.m
| 1,383 |
utf_8
|
027d96f5ef9ba1d0e9f6b49f6cb1bfe3
|
function setup_mcnExtraLayers
%SETUP_MCNEXTRALAYERS Sets up mcnExtraLayers by adding its folders to the path
% add dependencies
check_dependency('autonn') ;
root = fileparts(mfilename('fullpath')) ;
addpath(root, [root '/matlab'], [root '/matlab/wrappers'], [root '/utils']) ;
% -----------------------------------
function check_dependency(moduleName)
% -----------------------------------
name2path = @(name) strrep(name, '-', '_') ;
setupFunc = ['setup_', name2path(moduleName)] ;
if exist(setupFunc, 'file')
vl_contrib('setup', moduleName) ;
else
% try adding the module to the path
addpath(fullfile(vl_rootnn, 'contrib', moduleName)) ;
if exist(setupFunc, 'file')
vl_contrib('setup', moduleName) ;
else
waiting = true ;
msg = ['module %s was not found on the MATLAB path. Would you like ' ...
'to install it now? (y/n)\n'] ;
prompt = sprintf(msg, moduleName) ;
while waiting
str = input(prompt,'s') ;
switch str
case 'y'
vl_contrib('install', moduleName) ;
vl_contrib('compile', moduleName) ;
vl_contrib('setup', moduleName) ;
return ;
case 'n'
throw(exception) ;
otherwise
fprintf('input %s not recognised, please use `y` or `n`\n', str) ;
end
end
end
end
|
github
|
albanie/mcnExtraLayers-master
|
findBestEpoch.m
|
.m
|
mcnExtraLayers-master/utils/findBestEpoch.m
| 3,735 |
utf_8
|
b82068b1b1ed40c9b537adbec22bb03a
|
function bestEpoch = findBestEpoch(expDir, varargin)
%FINDBESTEPOCH finds the best epoch of training
% FINDBESTEPOCH(EXPDIR) evaluates the checkpoints
% (the `net-epoch-%d.mat` files created during
% training) in EXPDIR
%
% FINDBESTEPOCH(..., 'option', value, ...) accepts the following
% options:
%
% `priorityMetric`:: 'classError'
% Determines the highest priority metric by which to rank the
% checkpoints.
%
% `prune`:: false
% Removes all saved checkpoints to save space except:
%
% 1. The checkpoint with the lowest validation error metric
% 2. The last checkpoint
%
% Copyright (C) 2017 Samuel Albanie
% Licensed under The MIT License [see LICENSE.md for details]
opts.prune = false ;
opts.priorityMetric = 'classError' ;
opts = vl_argparse(opts, varargin) ;
lastEpoch = findLastCheckpoint(expDir);
if ~lastEpoch, return ; end % return if no checkpoints were found
bestEpoch = findBestValCheckpoint(expDir, opts.priorityMetric);
if opts.prune
preciousEpochs = [bestEpoch lastEpoch];
removeOtherCheckpoints(expDir, preciousEpochs);
fprintf('----------------------- \n');
fprintf('directory cleaned: %s\n', expDir);
fprintf('----------------------- \n');
end
% -------------------------------------------------------------------------
function removeOtherCheckpoints(expDir, preciousEpochs)
% -------------------------------------------------------------------------
list = dir(fullfile(expDir, 'net-epoch-*.mat')) ;
tokens = regexp({list.name}, 'net-epoch-([\d]+).mat', 'tokens') ;
epochs = cellfun(@(x) sscanf(x{1}{1}, '%d'), tokens) ;
targets = ~ismember(epochs, preciousEpochs);
files = cellfun(@(x) fullfile(expDir, sprintf('net-epoch-%d.mat', x)), ...
num2cell(epochs(targets)), 'UniformOutput', false);
cellfun(@(x) delete(x), files)
% -------------------------------------------------------------------------
function bestEpoch = findBestValCheckpoint(expDir, priorityMetric)
% -------------------------------------------------------------------------
lastEpoch = findLastCheckpoint(expDir) ;
if strcmp(priorityMetric, 'last'), bestEpoch = lastEpoch ; return ; end
% handle the different storage structures/error metrics
path = fullfile(expDir, sprintf('net-epoch-%d.mat', lastEpoch)) ;
try
data = load(path) ;
catch
msg = 'checkopint at %s was malformed, trying agin in 10 secs....\n' ;
warning(msg, path) ; pause(10) ; data = load(path) ;
end
if isfield(data, 'stats')
valStats = data.stats.val;
elseif isfield(data, 'info')
valStats = data.info.val;
elseif isfield(data, 'state')
valStats = data.state.stats.val ;
else
error('storage structure not recognised');
end
ascending = {'mAP', 'accuracy'} ;
descending = {'top1error', 'error', 'mbox_loss', 'class_loss'} ;
% find best checkpoint according to the following priority
metrics = [{priorityMetric} ascending descending] ;
for i = 1:numel(metrics)
if isfield(valStats, metrics{i})
errorMetric = [valStats.(metrics{i})] ;
selectedMetric = metrics{i} ;
break ;
end
end
assert(logical(exist('errorMetric', 'var')), 'error metrics not recognized') ;
if ismember(selectedMetric, ascending)
pick = @max ;
else
pick = @min ;
end
[~, bestEpoch] = pick(errorMetric);
% -------------------------------------------------------------------------
function epoch = findLastCheckpoint(expDir)
% -------------------------------------------------------------------------
list = dir(fullfile(expDir, 'net-epoch-*.mat')) ;
tokens = regexp({list.name}, 'net-epoch-([\d]+).mat', 'tokens') ;
epoch = cellfun(@(x) sscanf(x{1}{1}, '%d'), tokens) ;
epoch = max([epoch 0]) ;
|
github
|
albanie/mcnExtraLayers-master
|
checkLearningParams.m
|
.m
|
mcnExtraLayers-master/utils/checkLearningParams.m
| 9,611 |
utf_8
|
0dea868bbdec5be853e0fb633f4309ff
|
function checkLearningParams(mcn_outs, opts)
%CHECKlEARNINGPARAMS compare parameters against caffe.
% Algo: we first parse the prototxt and build a set of basic "layer"
% objects to store parameters. These can then be directly compared against
% their mcn equivalents to reduced the risk of incorrect initialisation.
caffeLayers = parseCaffeLayers(opts) ;
% loop over layers and check against network
for ii = 1:numel(caffeLayers)
layer = caffeLayers{ii} ;
msg = 'checking layer settings (%d/%d): %s\n' ;
fprintf(msg, ii, numel(caffeLayers), layer.name) ;
ignoreTypes = {'ReLU', 'Scale', 'Silence', 'Eltwise', 'Accuracy', ...
'BatchNorm', 'ImageData'} ;
ignoreNames = {'input-data', 'AnchorTargetLayer', 'rpn-data', ...
'roi-data', 'Annotation'} ;
if ismember(layer.type, ignoreTypes), continue ; end
if ismember(layer.name, ignoreNames), continue ; end
mcnLayerName = layer.name ;
found = false ;
if contains(layer.name, '-')
mcnLayerName = strrep(mcnLayerName, '-', '_') ;
fprintf('renaming search layer %s to %s\n', layer.name, mcnLayerName) ;
end
for jj = 1:numel(mcn_outs)
mcnLayer = mcn_outs{jj}.find(mcnLayerName) ;
if ~isempty(mcnLayer), mcn = mcnLayer{1} ; found = true ; break ; end
end
assert(found, 'matching layer not found') ;
switch layer.type
case 'Convolution'
checkFields = {'stride', 'pad', 'dilate', 'out', 'kernel_size', ...
'lr_mult', 'decay_mult'} ;
hasBias = isfield(layer, 'lr_multx') ;
mcnFilters = mcn.inputs{2} ; % assume square filters
msg = 'code must be modified to handle non-square filter checks' ;
assert(size(mcnFilters.value,1) == size(mcnFilters.value,2), msg) ;
filterOpts = {'kernel_size', size(mcnFilters.value, 1), ...
'out', size(mcnFilters.value, 4), ...
'lr_mult', mcnFilters.learningRate, ...
'decay_mult', mcnFilters.weightDecay} ;
mcnArgs = [ mcn.inputs filterOpts ] ;
if hasBias
mcnBias = mcnArgs{3} ;
biasOpts = {'lr_multx', mcnBias.learningRate, ...
'decay_multx', mcnBias.weightDecay} ;
mcnArgs = [ mcnArgs biasOpts ] ; %#ok
checkFields = [checkFields biasOpts([1 3])] ; %#ok
end
mcnArgs(strcmp(mcnArgs, 'CuDNN')) = [] ;
% extract params, fill in defaults and convert to canonical shape
caffe.stride = fetch(layer, 'stride', [1 2], [1 1]) ;
caffe.pad = fetch(layer, 'pad', [1 4], [0 0 0 0]) ;
caffe.out = fetch(layer, 'num_output', 1, 1) ;
caffe.dilate = fetch(layer, 'dilation', [1 2], [1 1]) ;
caffe.kernel_size = fetch(layer, 'kernel_size', [1 2], [1 1]) ;
caffe.decay_mult = fetch(layer, 'decay_mult', 1, 1) ;
caffe.lr_mult = fetch(layer, 'lr_mult', 1, 1) ;
if hasBias
caffe.lr_multx = fetch(layer, 'lr_multx', 1, 2) ;
caffe.decay_multx = fetch(layer, 'decay_multx', 1, 0) ;
end
case 'BatchNorm'
checkFields = {'lr_mult', 'lr_multx', 'lr_multxx', ...
'decay_mult', 'decay_multx', 'decay_multxx'} ;
mcnMult = mcn.inputs{2} ; mcnBias = mcn.inputs{3} ;
mcnMoments = mcn.inputs{4} ;
mcnArgs = {'lr_mult', mcnMult.learningRate, ...
'decay_mult', mcnMult.weightDecay, ...
'lr_multx', mcnBias.learningRate, ...
'decay_multx', mcnBias.weightDecay, ...
'lr_multxx', mcnMoments.learningRate, ...
'decay_multxx', mcnMoments.weightDecay} ;
for jj = 1:numel(checkFields)
caffe.(checkFields{jj}) = str2double(layer.(checkFields{jj})) ;
end
case 'Pooling'
checkFields = {'stride', 'pad', 'method', 'kernel_size'} ;
caffe.kernel_size = fetch(layer, 'kernel_size', [1 2], [1 1]) ;
caffe.stride = fetch(layer, 'stride', [1 2], [1 1]) ;
caffe.pad = fetch(layer, 'pad', [1 4], [0 0 0 0]) ;
% different convnetions: mcn `avg` == caffe `ave` (both use
% `max` for max pooling
caffe. method = strrep(lower(layer.pool), 'ave', 'avg') ;
poolOpts = mcn.inputs(3:end) ;
poolOpts(strcmp(poolOpts, 'CuDNN')) = [] ;
mcnArgs = [poolOpts {'kernel_size', mcn.inputs{2}}] ;
otherwise, fprintf('checking layer type: %s\n', layer.type) ;
end
% run checks
for jj = 1:numel(checkFields)
fieldName = checkFields{jj} ;
mcnPos = find(strcmp(mcnArgs, fieldName)) + 1 ;
value = mcnArgs{mcnPos} ; cValue = caffe.(fieldName) ;
if strcmp(fieldName, 'pad')
% since mcn and caffe handle padding slightly differntly, produce a
% warning rather than an error for different padding settings
if ~all(value == cValue)
msg = 'WARNING:: pad values do not match for %s: %s vs %s\n' ;
fprintf(msg, layer.name, mat2str(value), mat2str(cValue)) ;
end
else
msg = sprintf('unmatched parameters for %s', fieldName) ;
assert(all(value == cValue), msg) ;
end
end
end
% ---------------------------------------------
function x = fetch(layer, name, shape, default)
% ---------------------------------------------
if isfield(layer, name)
x = str2double(layer.(name)) ;
if numel(x) == 1, x = repmat(x, shape) ; end
else
x = default ;
end
% --------------------------------------
function layers = parseCaffeLayers(opts)
% --------------------------------------
% create name map
nameMap = containers.Map ;
nameMap('rpn_conv/3x3') = 'rpn_conv_3x3' ;
nameMap('psroipooled_loc_rois') = 'psroipooled_bbox_rois' ;
nameMap('loss') = 'loss_cls' ; % maintain mcn consistency
proto = fileread(opts.modelOpts.protoPath) ;
% mini parser
stack = {} ; tokens = strsplit(proto, '\n') ;
known = {'ResNet-50', 'ResNet50_BN_SCALE_Merge', ...
'VGG_ILSVRC_16_layers', 'SEC'} ;
msg = 'wrong proto' ;
assert(contains(tokens{1}, known), msg) ; tokens(1) = [] ;
layers = {} ; layer = struct() ;
while ~isempty(tokens)
head = tokens{1} ; tokens(1) = [] ; clean = cleanStr(head) ;
if isempty(clean) || strcmp(clean(1), '#')
% comment or blank proto line (do nothing)
elseif contains(head, '}') && contains(head, '{')
% NOTE: it's not always necessary to flatten out subfields
pair = strsplit(head, '{') ; key = cleanStr(pair{1}) ;
value = strjoin(pair(2:end), '{') ;
ptr = numel(value) - strfind(fliplr(value), '}') ;
value = value(1:ptr) ;
ignore = {'reshape_param'} ; % caffe and mcn use different values
examine = {'param', 'weight_filler', 'bias_filler', 'smooth_l1_loss_param'} ;
switch key
case ignore, continue ;
case examine, pairs = parseInlinePairs(value) ;
otherwise, error('nested key %s not recognised', key) ;
end
for jj = 1:numel(pairs)
pair = strsplit(pairs{jj}, ':') ;
layer = put(layer, cleanStr(pair{1}), cleanStr(pair{2})) ;
end
elseif contains(head, '}'), stack(end) = [] ;
elseif contains(head, '{'), stack{end+1} = head ; %#ok
else % handle some messy cases
tuple = strsplit(head, ':') ;
if numel(tuple) > 2
if strcmp(cleanStr(tuple{1}), 'param_str')
if numel(tuple) == 3
% standard param_str spec form. E.g.
% param_str: "'feat_stride': 16"
tuple(1) = [] ; % pop param_str specifier
else, keyboard
end
elseif numel(tuple) == 4
pairs = parseInlinePairs(head) ;
for jj = 1:numel(pairs)
pair = strsplit(pairs{jj}, ':') ;
layer = put(layer, cleanStr(pair{1}), cleanStr(pair{2})) ;
end
else, keyboard ;
end
end
key = cleanStr(tuple{1}) ; value = cleanStr(tuple{2}) ;
%if contains(head, 'rpn_conv/3x3'), keyboard ; end
if isKey(nameMap, value), value = nameMap(value) ; end
layer = put(layer, key, value) ;
end
if isempty(stack) && ~isempty(layer)
layers{end+1} = layer ; layer = {} ; %#ok
end
end
% -------------------------------------
function layer = put(layer, key, value)
% -------------------------------------
% store key-value pairs in layer without overwriting previous
% values. Due to MATLAB key naming restrictions, an x-suffix count is used
% for indexing
while isfield(layer, key), key = sprintf('%sx', key) ; end
layer.(key) = value ;
% ------------------------------------
function pairs = parseInlinePairs(str)
% ------------------------------------
% PARSIiNLINEPAIRS parses prototxt strings in which key-value pairs
% are supplied in a line, delimited only by whitespace. For example:
% kernel_size: 3 pad: 1 stride: 1
str = strtrim(str) ; % remove leading/trailing whitespace
dividers = strfind(str, ' ') ;
assert(mod(numel(dividers),2) == 1, 'expected odd number of dividers') ;
starts = [1 dividers(2:2:end)+1] ;
ends = [dividers(2:2:end)-1 numel(str)] ;
pairs = arrayfun(@(s,e) {str(s:e)}, starts, ends) ;
% --------------------------
function str = cleanStr(str)
% --------------------------
% prune unused space and punctuation from prototxt files
% clean up
str = strrep(strrep(strrep(str, '"', ''), ' ', ''), '''', '') ;
% stop at comments
comment = strfind(str, '#') ;
if ~isempty(comment)
str = str(1:comment(1)-1) ; % stop at first #
end
|
github
|
albanie/mcnExtraLayers-master
|
vl_nnaugdata.m
|
.m
|
mcnExtraLayers-master/matlab/vl_nnaugdata.m
| 3,294 |
utf_8
|
701424346f4149e883a403a5d675fc60
|
function y = vl_nnaugdata(x, varargin)
% VL_NNAUGDATA data augmentation for visual data
% Y = VL_NNAUGDATA(X) randomly applies a set of data augmentation
% transformations to the HxWxCxN input tensor X to produce an
% augmented version of the data Y (of the same shape as X).
%
% VL_NNAUGDATA(..., 'option', value, ...) takes the following options:
%
% `rotateLims`:: [-pi/8, pi/8]
% Uniformly samples rotation angles (in radians) from the given range and
% applies them to each batch element of the input.
%
% `zoomLims' :: [0.9, 1.1]
% Uniformly samples zoom factors from the given range and applies them to
% each batch element of the input.
%
% `skewLims' :: [-0.1, 0.1]
% Uniformly samples x and x skew-factors from the given range and applies
% them to each batch element of the input.
%
% `randTranslation` :: true
% If true, randomly samples an (x,y) offset for each batch element of the
% input, taking account of the zoomScale that was applied.
%
% Copyright (C) 2018 Samuel Albanie
% All rights reserved.
opts.rotateLims = [-pi/8, pi/8] ;
opts.zoomLims = [0.9, 1.1] ;
opts.skewLims = [-0.1, 0.1] ;
opts.randTranslation = true ;
[opts, dzdy] = vl_argparsepos(opts, varargin) ;
assert(isempty(dzdy), 'vl_nnaugdata does not current support backprop') ;
augs = computeAugs(numel(batch), opts) ;
% --------------------------------------------------------------------
function affs = computeAugs(batchSize, opts)
% --------------------------------------------------------------------
% Training time augmentations
ratio = 1/25 ;
augs = repmat(eye(3,3), 1, 1, batchSize) ;
maxOffset = round(ratio * 224) ; % based on Zhiding Yu's paper
minXY = randi(maxOffset, batchSize, 2) ;
zoomSc = (1 - ratio) + (ratio*2) * rand(1, batchSize) ;
zAffs = arrayfun(@(x) {zoomOut(zoomSc(x), minXY(x,:))}, 1:batchSize) ;
zAffs = cat(3, zAffs{:}) ;
vals = [-pi/18 0 pi/18] ;
thetas = randi(3, batchSize) ;
rAffs = arrayfun(@(x) {rotate(vals(thetas(x)))}, 1:batchSize) ;
rAffs = cat(3, rAffs{:}) ;
vals = [-0.1 0 0.1] ;
skews = randi(3, batchSize, 2) ;
sAffs = arrayfun(@(x) {skew(vals(skews(x,1)), vals(skews(x,2)))}, 1:batchSize) ;
sAffs = cat(3, sAffs{:}) ;
for ii = 1:batchSize
affs(:,:,ii) = zAffs(:,:,ii) * rAffs(:,:,ii) * sAffs(:,:,ii) ;
end
% only augment 50% of time
drop = find(rand(1, batchSize) > 0.5) ;
for ii = 1:numel(drop)
affs(:,:,drop(ii)) = eye(3,3) ;
end
% --------------------------------------------------------
function aff = zoomOut(zoomScale, minYX)
% --------------------------------------------------------
zs = (zoomScale - 1) / zoomScale ;
tx = zs - 2 * zs * minYX(2) ;
ty = zs - 2 * zs * minYX(1) ;
aff = [ 1 0 tx ; % compute the affine matrix
0 1 ty ;
0 0 1] * zoomScale ;
% --------------------------------------------------------
function aff = rotate(theta)
% --------------------------------------------------------
aff = [ cos(theta) -sin(theta) 0 ; % compute the affine matrix
sin(theta) cos(theta) 0 ;
0 0 1] ;
% --------------------------------------------------------
function aff = skew(s1, s2)
% --------------------------------------------------------
aff = [ 1 s1 0 ; % compute the affine matrix
s2 1 0 ;
0 0 1] ;
|
github
|
albanie/mcnExtraLayers-master
|
vl_nnnonorm.m
|
.m
|
mcnExtraLayers-master/matlab/vl_nnnonorm.m
| 1,345 |
utf_8
|
807c6f7dfff7d9811abb625348f1ea26
|
function [y, dzdg, dzdb] = vl_nnnonorm(x, g, b, varargin)
%VL_NNNONORM applies weights and biases, but does no normalization
% Y = VL_NNNONORM(X,G,B) applies a set of gains and biases to
% the input X with shape HxWxCxN. "No normalization" is defined as:
%
% Y(i,j,k,t) = G(k') * X(i,j,k,t) + B(k')
%
% where
% k' = group_idx(k,C,G), where N_G is the number of groups and
% group_idx(k,C,G) := floor(k / (C/N_G)).
%
% VL_NNGNORM(..., 'option', value, ...) takes the following option:
%
% This layer was largely inspired by this blog post:
% http://www.offconvex.org/2018/03/02/acceleration-overparameterization/
%
% Copyright (C) 2018 Samuel Albanie
% All rights reserved.
[~,dzdy] = vl_argparsepos(struct(), varargin) ;
expectedSz = [1 1 size(x,3) 1] ;
sg = size(g) ; sb = size(b) ;
assert(all(expectedSz(1:numel(sg)) == sg), 'GAINS have unexpected size') ;
assert(all(expectedSz(1:numel(sb)) == sb), 'BIASES have unexpected size') ;
if isempty(dzdy)
y = bsxfun(@times, g, x) ; % apply gain
y = bsxfun(@plus, y, b) ; % add bias
else
dzdy = dzdy{1} ;
dzdb = chanSum(dzdy) ;
dzdg = chanSum(x .* dzdy) ;
dzdx = bsxfun(@times, dzdy, g) ;
y = dzdx ;
end
% -----------------------
function res = chanSum(x)
% -----------------------
res = sum(sum(sum(x, 1), 2), 4) ;
|
github
|
albanie/mcnExtraLayers-master
|
vl_nngnorm.m
|
.m
|
mcnExtraLayers-master/matlab/vl_nngnorm.m
| 3,180 |
utf_8
|
823c3574250346a697bc9a4f1c6de84d
|
function [y, dzdg, dzdb] = vl_nngnorm(x, g, b, varargin)
%VL_NNGNORM CNN group normalization.
% Y = VL_NNGNORM(X,G,B) applies group normalization
% to the input X with shape HxWxCxN. Group normalization is defined as:
%
% Y(i,j,k,t) = G(k',t) * X_HAT(i,j,k,t) + B(k',t)
%
% where
% k' = group_idx(k,C,G), where N_G is the number of groups and
% group_idx(k,C,G) := floor(k / (C/N_G)).
% X_HAT(i,j,k,t) = (X_HAT(i,j,k,t) - mu(k',t)) / sigma(k',t)
% mu(k',t) = mean_ijk'' X(i,j,k'',t),
% sigma2(k',t) = mean_ijk'' (X(i,j,k'',t) - mu(k'',t))^2,
% sigma(k',t) = sqrt(sigma2(k) + EPSILON)
% where k'' takes values such that group_idx(k'',C,G) == group_idx(k,C,G)
%
% VL_NNGNORM(..., 'option', value, ...) takes the following option:
%
% `numGroups`:: 32
% The number of groups used to split the channels when computing
% normalization statistics.
%
% `epsilon`:: 1e-4
% A parameter to add stability to the normalization operation.
%
% Notes: GroupNorm is introduced in the paper:
% `Group Normalization, Yuxin Wu, Kaiming He,
% arXiv preprint arXiv:1803.08494 (2018)
%
% Copyright (C) 2018 Samuel Albanie
% All rights reserved.
opts.numGroups = 32 ;
opts.epsilon = 1e-4 ;
[opts,dzdy] = vl_argparsepos(opts, varargin) ;
bsize = size(x, 4) ;
expectedSz = [1 1 size(x,3) 1] ;
sg = size(g) ; sb = size(b) ;
assert(all(expectedSz(1:numel(sg)) == sg), 'GAINS have unexpected size') ;
assert(all(expectedSz(1:numel(sb)) == sb), 'BIASES have unexpected size') ;
szX = size(x) ; % store original shape
% compute statistics per group for current minibatch and normalize
x = reshape(x, size(x,1), size(x,2), [], opts.numGroups, bsize) ;
mu = groupAvg(x) ;
sigma2 = groupAvg(bsxfun(@minus, x, mu).^ 2) ;
sigma = sqrt(sigma2 + opts.epsilon) ;
x_hat = bsxfun(@rdivide, bsxfun(@minus, x, mu), sigma) ;
if isempty(dzdy)
x_hat_ = reshape(x_hat, szX) ;
y = bsxfun(@times, g, x_hat_) ; % apply gain
y = bsxfun(@plus, y, b) ; % add bias
else
dzdy = dzdy{1} ;
dzdb = chanSum(dzdy) ;
x_hat_ = reshape(x_hat, szX) ; dzdg = chanSum(x_hat_ .* dzdy) ;
dzdy = reshape(dzdy, size(x,1), size(x,2), [], opts.numGroups, bsize) ;
g_ = reshape(g, 1, 1, size(dzdy, 3), []) ;
dzdx_hat = bsxfun(@times, dzdy, g_) ;
t1 = bsxfun(@minus, x, mu) ;
m = prod([size(x,1) size(x,2) size(x,3)]) ;
dzdsigma = groupSum((-1/2) * dzdx_hat .* bsxfun(@rdivide, t1, sigma.^3)) ;
dzdmu = groupSum(bsxfun(@rdivide, dzdx_hat, -sigma)) + ...
bsxfun(@times, dzdsigma, -2 * groupAvg(t1)) ;
t4 = bsxfun(@rdivide, dzdx_hat, sigma) + ...
bsxfun(@times, dzdsigma, (2 / m) * t1) ;
dzdx = bsxfun(@plus, t4, dzdmu * (1/m)) ;
y = reshape(dzdx, szX) ;
end
% ----------------------------------------
function avg = groupAvg(x)
% ----------------------------------------
avg = mean(mean(mean(x, 1), 2), 3) ;
% -----------------------
function res = groupSum(x)
% -----------------------
res = sum(sum(sum(x, 1), 2), 3) ;
% -----------------------
function res = chanSum(x)
% -----------------------
res = sum(sum(sum(x, 1), 2), 4) ;
|
github
|
albanie/mcnExtraLayers-master
|
vl_nnbrenorm.m
|
.m
|
mcnExtraLayers-master/matlab/vl_nnbrenorm.m
| 3,216 |
utf_8
|
5fdea2ededecb39d822efa787e95fe7c
|
function [y, dzdg, dzdb, m] = vl_nnbrenorm(x, g, b, m, clips, test, varargin)
%VL_NNBRENORM CNN batch renormalisation.
% Y = VL_NNBRENORM(X,G,B,M,CLIPS,TEST) applies batch renormalization
% to the input X. Batch renormalization is defined as:
%
% Y(i,j,k,t) = G(k) * X_HAT(i,j,k,t) + B(k)
%
% where
% X_HAT(i,j,k,t) = R(k) * (X_HAT(i,j,k,t) - mu(k)) / sigma(k) + D(k)
% mu(k) = mean_ijt X(i,j,k,t),
% sigma2(k) = mean_ijt (X(i,j,k,t) - mu(k))^2,
% sigma(k) = sqrt(sigma2(k) + EPSILON)
% R(k) = cutoff(sigma(k) / M(2,k)), [1/rMax, rMax])
% D(k) = cutoff((mu(k) - M(1,k))/ M(2,k)), [-dMax, dMax])
% rMax = clips(1)
% dMax = clips(2)
%
% and we define cutoff(x, [a b]) to be the operation that clips the value
% of x to lie inside the range [a b]. The parameters G(k) and B(k) are
% multiplicative and additive constants use to scale each data channel, M
% is the 2xC array of moments used to track the batch mean and variance.
% R(k) and D(k) are used to balance the current estimate of feature
% means and variances between the statistics gathered from the current
% minibatch, and rolling averages over previous minibatches, as discussed
% in the paper:
%
% `Batch Renormalization: Towards Reducing Minibatch Dependence in
% Batch-Normalized Models` by Sergey Ioffe, 2017
%
% Copyright (C) 2017 Samuel Albanie
% All rights reserved.
[~, dzdy] = vl_argparsepos(struct(), varargin) ;
% unpack parameters
epsilon = 1e-4 ; rMax = clips(1) ; dMax = clips(2) ;
rolling_mu = permute(m(:,1), [3 2 1]) ;
rolling_sigma = permute(m(:,2), [3 2 1]) ;
if ~test
% first compute statistics per channel for current minibatch and normalize
mu = chanAvg(x) ; sigma2 = chanAvg(bsxfun(@minus, x, mu).^ 2) ;
sigma = sqrt(sigma2 + epsilon) ;
x_hat_ = bsxfun(@rdivide, bsxfun(@minus, x, mu), sigma) ;
% then "renormalize"
r = bsxfun(@min, bsxfun(@max, sigma ./ rolling_sigma, 1 / rMax), rMax) ;
d = bsxfun(@min, bsxfun(@max, (mu - rolling_mu)./rolling_sigma,-dMax), dMax) ;
x_hat = bsxfun(@plus, bsxfun(@times, x_hat_, r), d) ;
else
x_hat = bsxfun(@rdivide, bsxfun(@minus, x, rolling_mu), rolling_sigma) ;
end
if isempty(dzdy)
res = bsxfun(@times, g, x_hat) ; % apply gain
y = bsxfun(@plus, res, b) ; % add bias
else
% precompute some common terms
t1 = bsxfun(@minus, x, mu) ;
t2 = bsxfun(@rdivide, r, sigma) ;
t3 = bsxfun(@rdivide, r, sigma2) ;
sz = size(x) ; m = prod([sz(1:2) size(x,4)]) ;
dzdy = dzdy{1} ; dzdx_hat = bsxfun(@times, dzdy, g) ;
dzdsigma = chanSum(dzdx_hat .* bsxfun(@times, t1, -t3)) ;
dzdmu = chanSum(bsxfun(@times, dzdx_hat, -t2)) ;
t4 = bsxfun(@times, dzdx_hat, t2) + ...
bsxfun(@times, dzdsigma, bsxfun(@rdivide, t1, m * sigma)) ;
dzdx = bsxfun(@plus, t4, dzdmu * (1/m)) ; y = dzdx ;
dzdg = chanSum(x_hat .* dzdy) ; dzdb = chanSum(dzdy) ;
m = horzcat(squeeze(mu), squeeze(sigma)) ;
end
% -----------------------
function avg = chanAvg(x)
% -----------------------
avg = mean(mean(mean(x, 1), 2), 4) ;
% -----------------------
function res = chanSum(x)
% -----------------------
res = sum(sum(sum(x, 1), 2), 4) ;
|
github
|
albanie/mcnExtraLayers-master
|
vl_nnbrenorm_wrapper.m
|
.m
|
mcnExtraLayers-master/matlab/wrappers/vl_nnbrenorm_wrapper.m
| 1,743 |
utf_8
|
cff657bd6caed20fcb40d29a091db700
|
function [y, dzdg, dzdb, moments] = vl_nnbrenorm_wrapper(x, g, b, ...
moments, clips, test, varargin)
%VL_NNBRENORM_WRAPPER AutoNN wrapper for MatConvNet's vl_nnbrenorm
% VL_NNBRENORM has a non-standard interface (returns a derivative for the
% moments, even though they are not an input), so we must wrap it.
% Layer.vl_nnbrenorm replaces a standard VL_NNBRENORM call with this one.
%
% Copyright (C) 2017 Samuel Albanie
% (based on the autonn batchnorm wrapper by Joao F. Henriques)
% All rights reserved.
[opts, dzdy] = vl_argparsepos(struct(), varargin) ;
if isscalar(g)
g(1,1,1:size(x,3)) = g ;
end
if isscalar(b)
b(1,1,1:size(x,3)) = b ;
end
if isscalar(moments)
moments(1:size(x,3),1:2) = moments ;
end
if isempty(dzdy)
y = vl_nnbrenorm(x, g, b, moments, clips, test, varargin{:}) ;
else
[y, dzdg, dzdb, moments] = vl_nnbrenorm(x, g, b, moments, clips, ...
test, dzdy{1}, varargin{2:end}) ;
if usingDeprecatedLossFn
moments = moments * size(x, 4) ;
end
end
% ---------------------------------------------------------------------
function old = usingDeprecatedLossFn()
% ---------------------------------------------------------------------
% stolen from Joao Henriques (autonn compatibility code)
% this is needed to harmonize the behavior of two versions of vl_nnloss:
% the legacy behavior which *sums* the loss over the batch, and the new
% behavior that takes the *average* over the batch.
% first, detect if the new behavior ('normalise' option) is present.
old = false ;
try
vl_nnloss([], [], 'normalise', true) ;
catch % unrecognized option, must be the old vl_nnloss
old = true ;
end
|
github
|
albanie/mcnExtraLayers-master
|
nnslice.m
|
.m
|
mcnExtraLayers-master/matlab/xtest/suite/nnslice.m
| 881 |
utf_8
|
9094327e26df505701c1a9362932ec3c
|
classdef nnslice < nntest
methods (Test)
function basic(test)
sz = [3,3,5,4] ;
x = test.randn(sz) ;
dim = 4 ;
slicePoints = 1:dim - 1 ; % slice along fourth dim
y = vl_nnslice(x, dim, slicePoints, []) ;
% check derivatives with numerical approximation
dzdy = cellfun(@(x) test.randn(size(x)), y, 'Uni', 0) ;
dzdx = vl_nnslice(x, dim, slicePoints, dzdy, 'inputSizes', {sz}) ;
dzdy_ = cat(dim, dzdy{:}) ;
dzdx_ = dzdx{1} ;
test.der(@(x) forward_wrapper(x, dim, slicePoints), x, dzdy_, dzdx_, 1e-3*test.range) ;
end
end
end
% -----------------------------------------------------------------
function y = forward_wrapper(x, dim, slicePoints)
% -----------------------------------------------------------------
y = vl_nnslice(x, dim, slicePoints, []) ;
y = cat(dim, y{:}) ;
end
|
github
|
albanie/mcnExtraLayers-master
|
nntukeyloss.m
|
.m
|
mcnExtraLayers-master/matlab/xtest/suite/nntukeyloss.m
| 1,350 |
utf_8
|
977e5438454bf3ef4c0eb49b95fa5ec3
|
classdef nntukeyloss < nntest
methods (Test)
function basic(test)
% We have to be a little bit devious when constructing the
% numerical check - if computing
% x(i) + delta
% changes the value of the median of the residuals, the MAD value
% will also change and there will appear to be a discontinuity
% Therefore, we only run derivative checks on part of the input
% In particular, we deliberately run checks on a portion
% the inputs (which should not trigger the change in median)
n = 50 ;
safety = 5 ;
m = n * safety ;
% create extra entries to safeguard the median
xSource = sort(test.randn([m 1]) / 1) ;
x = xSource(1:n) ;
xPad = xSource(n+1:end) ;
fullX = [x ; xPad] ;
t = sort(test.randn([m 1]) / 1) ;
y = vl_nntukeyloss(fullX, t) ;
% check derivatives with numerical approximation
dzdy = test.randn(size(y)) ;
dzdx = vl_nntukeyloss(fullX, t, dzdy) ;
% restrict to test range
dzdx = dzdx(1:numel(x)) ;
test.der(@(x) splitInputsTukey(x, t, xPad), x, dzdy, dzdx, 1e-3*test.range) ;
end
end
end
% ---------------------------------------
function y = splitInputsTukey(x, t, xPad)
% ---------------------------------------
fullX = [x ; xPad] ;
y = vl_nntukeyloss(fullX, t) ;
end
|
github
|
g4idrijs/ultrasoundsim-master
|
off_axis_demo.m
|
.m
|
ultrasoundsim-master/demos/off_axis_demo.m
| 2,827 |
utf_8
|
5c1318c6b824864b8e1d0cbdc2bb87aa
|
% Demo of simulating off axis.
function [pw] = simulate_off_axis()
% Structured as a function so that we can write helper functions in the
% same file.
% Setup the transducer array.
width = 5e-5;
height = 5e-5;
elements_x = 800;
elements_y = 1;
kerf = 5e-5;
r_curv = 6e-2;
transducer_array = create_rect_csa(...
elements_x, elements_y, width, height, kerf, kerf, r_curv);
% figure();
% draw_array(transducer_array);
% Set up the media. By default we'll use water.
define_media();
%%% Spot 1
focus_x = 1.75e-2;
focus_y = 0;
focus_z = 0.75e-2;
freq = 4e6;
target_1_array = find_single_focus_phase(...
transducer_array, focus_x, focus_y, focus_z, water, freq, 200);
target_1_array = target_1_array(600:700);
pw1 = calc_pw(target_1_array, freq);
%%% Spot 2
focus_x = 0;
focus_y = 0;
focus_z = 0.75e-2;
freq = 4e6;
target_2_array = find_single_focus_phase(...
transducer_array, focus_x, focus_y, focus_z, water, freq, 200);
target_2_array = target_2_array(350:450);
pw2 = calc_pw(target_2_array, freq);
%%% Trick to highlight the whole array.
focus_x = 0e-2;
focus_y = 0;
focus_z = 0;
freq = 5e7;
transducer_array = find_single_focus_phase(...
transducer_array, focus_x, focus_y, focus_z, water, freq, 200);
% pw3 = calc_pw(transducer_array, freq);
% Add the pressure waves and plot.
% pw = pw1 + pw2 + pw3;
pw = pw1 + pw2;
% pw = pw1;
plot_pw(pw)
end
function [x, y, z, coord_grid] = get_x_y_z_coord_grid()
% Helper to get coordinates.
define_media();
% Set up the viewport and resolution.
xmin = -4e-2;
xmax = 4e-2;
ymin = 0;
ymax = 0;
zmin = -0.5e-2;
zmax = 2.5e-2;
xpoints = 500;
ypoints = 1;
zpoints = 500;
dx = (xmax-xmin)/xpoints;
dy = (ymax-ymin)/ypoints;
dz = (zmax-zmin)/zpoints;
delta = [dx dy dz];
x = xmin:dx:xmax;
y = ymin:dy:ymax;
z = zmin:dz:zmax;
coord_grid = set_coordinate_grid(delta, xmin, xmax, ymin, ymax, zmin, zmax);
end
function [p_cw] = calc_pw(transducer_array, freq)
% Helper function to compute pressure wave and plot on sublot.
define_media();
[x, y, z, coord_grid] = get_x_y_z_coord_grid();
% Run the simulation to calculate the pressure field.
ndiv=3;
tic();
disp('Calculating pressure field...');
p_cw=cw_pressure(transducer_array, coord_grid, water, ndiv, freq);
disp(['Simulation complete in ', num2str(toc()), ' seconds.'])
end
function plot_pw(p_cw)
[x, y, z, coord_grid] = get_x_y_z_coord_grid();
h = pcolor(x*100,z*100,rot90(squeeze(abs(p_cw)),3));
set(h,'edgecolor','none');
xlabel('x (cm)');
ylabel('z (cm)');
end
|
github
|
g4idrijs/ultrasoundsim-master
|
titrate_size_spacing_combo_and_focus.m
|
.m
|
ultrasoundsim-master/demos/titrate_size_spacing_combo_and_focus.m
| 3,710 |
utf_8
|
e87dc4285343cb1fca055bd56b79f452
|
% Script to run through different spacings and focus.
function titrate_spacing_and_focus()
% Structured as a function so that we can write helper functions in the
% same file.
% Constant of 1 element in y-direction.
elements_y = 1;
% Curvature to match human skull.
r_curv = 6e-2;
% Define media variables.
define_media();
% Set stimulation frequency.
f0 = 4e6;
num_elements = 100;
% Set the focus target.
focus_x = 0;
focus_y = 0;
focus_z = 1e-2; % 2e-2; % 1cm
% Try all combinations of spacing and focus.
spacing_list = [
5e-5
1e-4
5e-4
1e-3
5e-3
];
focus_z_list = [
1e-2
2e-2
3e-2
4e-2
5e-2
];
% Dimensions of subplots, i.e. how many plots to show.
subplot_dims = [length(spacing_list) length(focus_z_list)];
figure();
% Main title.
text(0.5, 1,'\bf Spacing vs Focus Titration','HorizontalAlignment' , ...
'center', 'VerticalAlignment', 'top');
for spacing_idx = 1:length(spacing_list)
for focus_z_idx = 1:length(focus_z_list)
spacing = spacing_list(spacing_idx);
focus_z = focus_z_list(focus_z_idx);
subplot_idx = (spacing_idx - 1) * length(focus_z_list) + ...
focus_z_idx;
% Decrement num_elements until fits within curvature.
% Based on error-catching code in create_rect_csa.m.
c_length = 2*pi*r_curv;
while (num_elements * 2 * spacing) > (c_length/2)
num_elements = num_elements - 1;
end
transducer_array = create_rect_csa(num_elements, elements_y, ...
spacing, spacing, spacing, spacing, r_curv);
% Uncomment to draw array diagrams only.
% subplot(subplot_dims(1), subplot_dims(2), subplot_idx);
% draw_array(transducer_array);
% title(sprintf('spacing: %g, elements: %g', spacing, num_elements));
% continue;
% Caculate single-focus phase.
transducer_array = find_single_focus_phase(...
transducer_array, focus_x, focus_y, focus_z, water, f0, 200);
% Compuate pressure wave and plot result.
calc_pw_and_plot(transducer_array, subplot_dims, subplot_idx, ...
spacing, num_elements, focus_z);
end
end
end
function calc_pw_and_plot(transducer_array, subplot_dims, subplot_idx, ...
spacing, num_elements, focus)
% Helper function to compute pressure wave and plot on sublot.
define_media();
% Set up the viewport and resolution.
xmin = -2e-2;
xmax = 2e-2;
ymin = 0;
ymax = 0;
zmin = -1e-2;
zmax = 6e-2;
xpoints = 500;
ypoints = 1;
zpoints = 500;
dx = (xmax-xmin)/xpoints;
dy = (ymax-ymin)/ypoints;
dz = (zmax-zmin)/zpoints;
delta = [dx dy dz];
x = xmin:dx:xmax;
y = ymin:dy:ymax;
z = zmin:dz:zmax;
coord_grid = set_coordinate_grid(delta, xmin, xmax, ymin, ymax, zmin, zmax);
% Run the simulation to calculate the pressure field.
ndiv=3;
tic();
disp('Calculating pressure field...');
p_cw=cw_pressure(transducer_array, coord_grid, water, ndiv, f0);
disp(['Simulation complete in ', num2str(toc()), ' seconds.'])
% Plot the result.
subplot(subplot_dims(1), subplot_dims(2), subplot_idx);
h = pcolor(x*100,z*100,rot90(squeeze(abs(p_cw)),3));
set(h,'edgecolor','none');
title(sprintf('focus: %g, spacing: %g, elements: %g', ...
focus, spacing, num_elements));
xlabel('x (cm)');
ylabel('z (cm)');
end
|
github
|
g4idrijs/ultrasoundsim-master
|
titrate_spacing_and_focus.m
|
.m
|
ultrasoundsim-master/demos/titrate_spacing_and_focus.m
| 3,749 |
utf_8
|
7c08022742b0c92d5a9bc139f1b16e5b
|
% Script to run through different spacings and focus.
function titrate_spacing_and_focus()
% Structured as a function so that we can write helper functions in the
% same file.
% Constant of 1 element in y-direction.
elements_y = 1;
% Curvature to match human skull.
r_curv = 6e-2;
% Define media variables.
define_media();
% Set stimulation frequency.
f0 = 4e6;
num_elements = 100;
% Set the focus target.
focus_x = 0;
focus_y = 0;
focus_z = 1e-2; % 2e-2; % 1cm
element_width = 5e-4;
% Try all combinations of spacing and focus.
spacing_list = [
5e-5
1e-4
5e-4
1e-3
5e-3
];
focus_z_list = [
1e-2
2e-2
3e-2
4e-2
5e-2
];
% Dimensions of subplots, i.e. how many plots to show.
subplot_dims = [length(spacing_list) length(focus_z_list)];
figure();
% Main title.
text(0.5, 1,'\bf Spacing vs Focus Titration','HorizontalAlignment' , ...
'center', 'VerticalAlignment', 'top');
for spacing_idx = 1:length(spacing_list)
for focus_z_idx = 1:length(focus_z_list)
spacing = spacing_list(spacing_idx);
focus_z = focus_z_list(focus_z_idx);
subplot_idx = (spacing_idx - 1) * length(focus_z_list) + ...
focus_z_idx;
% Decrement num_elements until fits within curvature.
% Based on error-catching code in create_rect_csa.m.
c_length = 2*pi*r_curv;
while (num_elements * 2 * spacing) > (c_length/2)
num_elements = num_elements - 1;
end
transducer_array = create_rect_csa(num_elements, elements_y, ...
element_width, element_width, spacing, spacing, r_curv);
% Uncomment to draw array diagrams only.
% subplot(subplot_dims(1), subplot_dims(2), subplot_idx);
% draw_array(transducer_array);
% title(sprintf('spacing: %g, elements: %g', spacing, num_elements));
% continue;
% Caculate single-focus phase.
transducer_array = find_single_focus_phase(...
transducer_array, focus_x, focus_y, focus_z, water, f0, 200);
% Compuate pressure wave and plot result.
calc_pw_and_plot(transducer_array, subplot_dims, subplot_idx, ...
spacing, num_elements, focus_z);
end
end
end
function calc_pw_and_plot(transducer_array, subplot_dims, subplot_idx, ...
spacing, num_elements, focus)
% Helper function to compute pressure wave and plot on sublot.
define_media();
% Set up the viewport and resolution.
xmin = -2e-2;
xmax = 2e-2;
ymin = 0;
ymax = 0;
zmin = -1e-2;
zmax = 6e-2;
xpoints = 500;
ypoints = 1;
zpoints = 500;
dx = (xmax-xmin)/xpoints;
dy = (ymax-ymin)/ypoints;
dz = (zmax-zmin)/zpoints;
delta = [dx dy dz];
x = xmin:dx:xmax;
y = ymin:dy:ymax;
z = zmin:dz:zmax;
coord_grid = set_coordinate_grid(delta, xmin, xmax, ymin, ymax, zmin, zmax);
% Run the simulation to calculate the pressure field.
ndiv=3;
tic();
disp('Calculating pressure field...');
p_cw=cw_pressure(transducer_array, coord_grid, water, ndiv, f0);
disp(['Simulation complete in ', num2str(toc()), ' seconds.'])
% Plot the result.
subplot(subplot_dims(1), subplot_dims(2), subplot_idx);
h = pcolor(x*100,z*100,rot90(squeeze(abs(p_cw)),3));
set(h,'edgecolor','none');
title(sprintf('focus: %g, spacing: %g, elements: %g', ...
focus, spacing, num_elements));
xlabel('x (cm)');
ylabel('z (cm)');
end
|
github
|
g4idrijs/ultrasoundsim-master
|
titrate_spacing_and_num_elements.m
|
.m
|
ultrasoundsim-master/demos/titrate_spacing_and_num_elements.m
| 3,890 |
utf_8
|
cfc872229bb8c15d3bef27fb6f767c37
|
% Script to run through different element spacings to observe effect.
% Titrate through combinations of num elements and spacing.
function titrate_spacing_and_num_elements()
% Structured as a function so that we can write helper functions in the
% same file.
% Constant of 1 element in y-direction.
elements_y = 1;
% Curvature to match human skull.
r_curv = 6e-2;
% Define media variables.
define_media();
% Set stimulation frequency.
f0 = 4e6;
% Set the focus target.
focus_x = 0;
focus_y = 0;
focus_z = 1e-2; % 2e-2; % 1cm
% Try all combinations of spacing and num_elements. If num_elements too
% large, decrement until acceptible number.
spacing_list = [
1e-5
5e-5
1e-4
5e-4
1e-3
5e-3
];
num_elements_list = [
100
150
200
250
];
% Dimensions of subplots, i.e. how many plots to show.
subplot_dims = [length(spacing_list) length(num_elements_list)];
figure();
for spacing_idx = 1:length(spacing_list)
for num_elements_idx = 1:length(num_elements_list)
spacing = spacing_list(spacing_idx);
num_elements = num_elements_list(num_elements_idx);
subplot_idx = (spacing_idx - 1) * length(num_elements_list) + ...
num_elements_idx;
% Decrement num_elements until fits within curvature.
% Based on error-catching code in create_rect_csa.m.
c_length = 2*pi*r_curv;
while (num_elements * 2 * spacing) > (c_length/2)
num_elements = num_elements - 1;
end
transducer_array = create_rect_csa(num_elements, elements_y, ...
spacing, spacing, spacing, spacing, r_curv);
% Uncomment to draw array diagrams only.
% subplot(subplot_dims(1), subplot_dims(2), subplot_idx);
% draw_array(transducer_array);
% title(sprintf('spacing: %g, elements: %g', spacing, num_elements));
% continue;
% Caculate single-focus phase.
transducer_array = find_single_focus_phase(...
transducer_array, focus_x, focus_y, focus_z, water, f0, 200);
% Compuate pressure wave and plot result.
calc_pw_and_plot(transducer_array, subplot_dims, subplot_idx, ...
spacing, num_elements);
% Give the processor a rest.
pause(30);
end
end
% Main title.
text(0.5, 1,'\bf Spacing Titration','HorizontalAlignment' ,'center', ...
'VerticalAlignment', 'top');
end
function calc_pw_and_plot(transducer_array, subplot_dims, subplot_idx, ...
spacing, num_elements)
% Helper function to compute pressure wave and plot on sublot.
define_media();
% Set up the viewport and resolution.
xmin = -2e-2;
xmax = 2e-2;
ymin = 0;
ymax = 0;
zmin = -1e-2;
zmax = 3e-2;
xpoints = 1000;
ypoints = 1;
zpoints = 1000;
dx = (xmax-xmin)/xpoints;
dy = (ymax-ymin)/ypoints;
dz = (zmax-zmin)/zpoints;
delta = [dx dy dz];
x = xmin:dx:xmax;
y = ymin:dy:ymax;
z = zmin:dz:zmax;
coord_grid = set_coordinate_grid(delta, xmin, xmax, ymin, ymax, zmin, zmax);
% Run the simulation to calculate the pressure field.
ndiv=3;
tic();
disp('Calculating pressure field...');
p_cw=cw_pressure(transducer_array, coord_grid, water, ndiv, f0);
disp(['Simulation complete in ', num2str(toc()), ' seconds.'])
% Plot the result.
subplot(subplot_dims(1), subplot_dims(2), subplot_idx);
h = pcolor(x*100,z*100,rot90(squeeze(abs(p_cw)),3));
set(h,'edgecolor','none');
title(sprintf('spacing: %g, elements: %g', spacing, num_elements));
xlabel('x (cm)');
ylabel('z (cm)');
end
|
github
|
g4idrijs/ultrasoundsim-master
|
titrate_frequency.m
|
.m
|
ultrasoundsim-master/demos/titrate_frequency.m
| 2,752 |
utf_8
|
7955d432fdd68a56a0a038a40a0c6693
|
% Script to run through different frequencies to observe effect.
function titrate_frequency()
% Structured as a function so that we can write helper functions in the
% same file.
% Setup the transducer array.
width = 5e-5;
height = 5e-5;
elements_x = 200;
elements_y = 1;
kerf = 5e-5;
r_curv = 6e-2;
transducer_array = create_rect_csa(...
elements_x, elements_y, width, height, kerf, kerf, r_curv);
figure();
draw_array(transducer_array);
% Set up the media. By default we'll use water.
define_media();
% Set the focus target.
focus_x = 0;
focus_y = 0;
focus_z = 1e-2; % 2e-2; % 1cm
% Titrate over these frequencies.
freq_list = [
1e4
5e4
1e5
5e5
1e6
4e6
5e6
1e7
5e7
];
% Dimensions of subplots, i.e. how many plots to show.
figure();
subplot_dims = [3 3];
for i = 1:length(freq_list)
freq = freq_list(i);
% Caculate single-focus phase.
transducer_array = find_single_focus_phase(...
transducer_array, focus_x, focus_y, focus_z, water, freq, 200);
% Compuate pressure wave and plot result.
calc_pw_and_plot(transducer_array, subplot_dims, i, freq);
end
% Main title.
ha = axes('Position',[0 0 1 1],'Xlim',[0 1],'Ylim',[0 1],'Box','off', ...
'Visible','off','Units','normalized', 'clipping' , 'off');
text(0.5, 1,'\bf Frequency Tittration?','HorizontalAlignment' ,'center', ...
'VerticalAlignment', 'top');
end
function calc_pw_and_plot(transducer_array, subplot_dims, subplot_idx, freq)
% Helper function to compute pressure wave and plot on sublot.
define_media();
% Set up the viewport and resolution.
xmin = -2e-2;
xmax = 2e-2;
ymin = 0;
ymax = 0;
zmin = -1e-2;
zmax = 3e-2;
xpoints = 1000;
ypoints = 1;
zpoints = 1000;
dx = (xmax-xmin)/xpoints;
dy = (ymax-ymin)/ypoints;
dz = (zmax-zmin)/zpoints;
delta = [dx dy dz];
x = xmin:dx:xmax;
y = ymin:dy:ymax;
z = zmin:dz:zmax;
coord_grid = set_coordinate_grid(delta, xmin, xmax, ymin, ymax, zmin, zmax);
% Run the simulation to calculate the pressure field.
ndiv=3;
tic();
disp('Calculating pressure field...');
p_cw=cw_pressure(transducer_array, coord_grid, water, ndiv, f0);
disp(['Simulation complete in ', num2str(toc()), ' seconds.'])
% Plot the result.
subplot(subplot_dims(1), subplot_dims(2), subplot_idx);
h = pcolor(x*100,z*100,rot90(squeeze(abs(p_cw)),3));
set(h,'edgecolor','none');
title(sprintf('freq = %g', freq));
xlabel('x (cm)');
ylabel('z (cm)');
end
|
github
|
g4idrijs/ultrasoundsim-master
|
titrate_focus_position.m
|
.m
|
ultrasoundsim-master/demos/titrate_focus_position.m
| 2,762 |
utf_8
|
915696ae209fb87bf5994cc3c0f4f71c
|
% Script to run through different frequencies to observe effect.
function titrate_focus_position()
% Structured as a function so that we can write helper functions in the
% same file.
% Setup the transducer array.
width = 5e-4;
height = 5e-4;
elements_x = 100;
elements_y = 1;
kerf = 1e-3;
r_curv = 6e-2;
transducer_array = create_rect_csa(...
elements_x, elements_y, width, height, kerf, kerf, r_curv);
figure();
draw_array(transducer_array);
% Set up the media. By default we'll use water.
define_media();
% Set the focus target.
focus_x = 0;
focus_y = 0;
focus_z = 1e-2; % 2e-2; % 1cm
freq = 1e6;
focus_z_list = [
5e-5
1e-2
2e-2
3e-2
4e-2
5e-2
6e-2
7e-2
8e-2
];
% Dimensions of subplots, i.e. how many plots to show.
figure();
subplot_dims = [3 3];
for i = 1:length(focus_z_list)
focus_z = focus_z_list(i);
% Caculate single-focus phase.
transducer_array = find_single_focus_phase(...
transducer_array, focus_x, focus_y, focus_z, water, freq, 200);
% Compuate pressure wave and plot result.
calc_pw_and_plot(transducer_array, subplot_dims, i, focus_z);
end
% Main title.
ha = axes('Position',[0 0 1 1],'Xlim',[0 1],'Ylim',[0 1],'Box','off', ...
'Visible','off','Units','normalized', 'clipping' , 'off');
text(0.5, 1,'\bf Frequency Tittration?','HorizontalAlignment' ,'center', ...
'VerticalAlignment', 'top');
end
function calc_pw_and_plot(transducer_array, subplot_dims, subplot_idx, focus)
% Helper function to compute pressure wave and plot on sublot.
define_media();
% Set up the viewport and resolution.
xmin = -6e-2;
xmax = 6e-2;
ymin = 0;
ymax = 0;
zmin = -1e-2;
zmax = 10e-2;
xpoints = 300;
ypoints = 1;
zpoints = 300;
dx = (xmax-xmin)/xpoints;
dy = (ymax-ymin)/ypoints;
dz = (zmax-zmin)/zpoints;
delta = [dx dy dz];
x = xmin:dx:xmax;
y = ymin:dy:ymax;
z = zmin:dz:zmax;
coord_grid = set_coordinate_grid(delta, xmin, xmax, ymin, ymax, zmin, zmax);
% Run the simulation to calculate the pressure field.
ndiv=3;
tic();
disp('Calculating pressure field...');
p_cw=cw_pressure(transducer_array, coord_grid, water, ndiv, f0);
disp(['Simulation complete in ', num2str(toc()), ' seconds.'])
% Plot the result.
subplot(subplot_dims(1), subplot_dims(2), subplot_idx);
h = pcolor(x*100,z*100,rot90(squeeze(abs(p_cw)),3));
set(h,'edgecolor','none');
title(sprintf('focus = %g', focus));
xlabel('x (cm)');
ylabel('z (cm)');
end
|
github
|
Bladefidz/machine-learning-master
|
submit.m
|
.m
|
machine-learning-master/coursera/machine-learning-standford-univerity/machine-learning-ex2/ex2/submit.m
| 1,605 |
utf_8
|
9b63d386e9bd7bcca66b1a3d2fa37579
|
function submit()
addpath('./lib');
conf.assignmentSlug = 'logistic-regression';
conf.itemName = 'Logistic Regression';
conf.partArrays = { ...
{ ...
'1', ...
{ 'sigmoid.m' }, ...
'Sigmoid Function', ...
}, ...
{ ...
'2', ...
{ 'costFunction.m' }, ...
'Logistic Regression Cost', ...
}, ...
{ ...
'3', ...
{ 'costFunction.m' }, ...
'Logistic Regression Gradient', ...
}, ...
{ ...
'4', ...
{ 'predict.m' }, ...
'Predict', ...
}, ...
{ ...
'5', ...
{ 'costFunctionReg.m' }, ...
'Regularized Logistic Regression Cost', ...
}, ...
{ ...
'6', ...
{ 'costFunctionReg.m' }, ...
'Regularized Logistic Regression Gradient', ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId, auxstring)
% Random Test Cases
X = [ones(20,1) (exp(1) * sin(1:1:20))' (exp(0.5) * cos(1:1:20))'];
y = sin(X(:,1) + X(:,2)) > 0;
if partId == '1'
out = sprintf('%0.5f ', sigmoid(X));
elseif partId == '2'
out = sprintf('%0.5f ', costFunction([0.25 0.5 -0.5]', X, y));
elseif partId == '3'
[cost, grad] = costFunction([0.25 0.5 -0.5]', X, y);
out = sprintf('%0.5f ', grad);
elseif partId == '4'
out = sprintf('%0.5f ', predict([0.25 0.5 -0.5]', X));
elseif partId == '5'
out = sprintf('%0.5f ', costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1));
elseif partId == '6'
[cost, grad] = costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1);
out = sprintf('%0.5f ', grad);
end
end
|
github
|
Bladefidz/machine-learning-master
|
submit.m
|
.m
|
machine-learning-master/coursera/machine-learning-standford-univerity/machine-learning-ex4/ex4/submit.m
| 1,635 |
utf_8
|
ae9c236c78f9b5b09db8fbc2052990fc
|
function submit()
addpath('./lib');
conf.assignmentSlug = 'neural-network-learning';
conf.itemName = 'Neural Networks Learning';
conf.partArrays = { ...
{ ...
'1', ...
{ 'nnCostFunction.m' }, ...
'Feedforward and Cost Function', ...
}, ...
{ ...
'2', ...
{ 'nnCostFunction.m' }, ...
'Regularized Cost Function', ...
}, ...
{ ...
'3', ...
{ 'sigmoidGradient.m' }, ...
'Sigmoid Gradient', ...
}, ...
{ ...
'4', ...
{ 'nnCostFunction.m' }, ...
'Neural Network Gradient (Backpropagation)', ...
}, ...
{ ...
'5', ...
{ 'nnCostFunction.m' }, ...
'Regularized Gradient', ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId, auxstring)
% Random Test Cases
X = reshape(3 * sin(1:1:30), 3, 10);
Xm = reshape(sin(1:32), 16, 2) / 5;
ym = 1 + mod(1:16,4)';
t1 = sin(reshape(1:2:24, 4, 3));
t2 = cos(reshape(1:2:40, 4, 5));
t = [t1(:) ; t2(:)];
if partId == '1'
[J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0);
out = sprintf('%0.5f ', J);
elseif partId == '2'
[J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5);
out = sprintf('%0.5f ', J);
elseif partId == '3'
out = sprintf('%0.5f ', sigmoidGradient(X));
elseif partId == '4'
[J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0);
out = sprintf('%0.5f ', J);
out = [out sprintf('%0.5f ', grad)];
elseif partId == '5'
[J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5);
out = sprintf('%0.5f ', J);
out = [out sprintf('%0.5f ', grad)];
end
end
|
github
|
Bladefidz/machine-learning-master
|
submit.m
|
.m
|
machine-learning-master/coursera/machine-learning-standford-univerity/machine-learning-ex6/ex6/submit.m
| 1,318 |
utf_8
|
bfa0b4ffb8a7854d8e84276e91818107
|
function submit()
addpath('./lib');
conf.assignmentSlug = 'support-vector-machines';
conf.itemName = 'Support Vector Machines';
conf.partArrays = { ...
{ ...
'1', ...
{ 'gaussianKernel.m' }, ...
'Gaussian Kernel', ...
}, ...
{ ...
'2', ...
{ 'dataset3Params.m' }, ...
'Parameters (C, sigma) for Dataset 3', ...
}, ...
{ ...
'3', ...
{ 'processEmail.m' }, ...
'Email Preprocessing', ...
}, ...
{ ...
'4', ...
{ 'emailFeatures.m' }, ...
'Email Feature Extraction', ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId, auxstring)
% Random Test Cases
x1 = sin(1:10)';
x2 = cos(1:10)';
ec = 'the quick brown fox jumped over the lazy dog';
wi = 1 + abs(round(x1 * 1863));
wi = [wi ; wi];
if partId == '1'
sim = gaussianKernel(x1, x2, 2);
out = sprintf('%0.5f ', sim);
elseif partId == '2'
load('ex6data3.mat');
[C, sigma] = dataset3Params(X, y, Xval, yval);
out = sprintf('%0.5f ', C);
out = [out sprintf('%0.5f ', sigma)];
elseif partId == '3'
word_indices = processEmail(ec);
out = sprintf('%d ', word_indices);
elseif partId == '4'
x = emailFeatures(wi);
out = sprintf('%d ', x);
end
end
|
github
|
Bladefidz/machine-learning-master
|
porterStemmer.m
|
.m
|
machine-learning-master/coursera/machine-learning-standford-univerity/machine-learning-ex6/ex6/porterStemmer.m
| 9,902 |
utf_8
|
7ed5acd925808fde342fc72bd62ebc4d
|
function stem = porterStemmer(inString)
% Applies the Porter Stemming algorithm as presented in the following
% paper:
% Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
% no. 3, pp 130-137
% Original code modeled after the C version provided at:
% http://www.tartarus.org/~martin/PorterStemmer/c.txt
% The main part of the stemming algorithm starts here. b is an array of
% characters, holding the word to be stemmed. The letters are in b[k0],
% b[k0+1] ending at b[k]. In fact k0 = 1 in this demo program (since
% matlab begins indexing by 1 instead of 0). k is readjusted downwards as
% the stemming progresses. Zero termination is not in fact used in the
% algorithm.
% To call this function, use the string to be stemmed as the input
% argument. This function returns the stemmed word as a string.
% Lower-case string
inString = lower(inString);
global j;
b = inString;
k = length(b);
k0 = 1;
j = k;
% With this if statement, strings of length 1 or 2 don't go through the
% stemming process. Remove this conditional to match the published
% algorithm.
stem = b;
if k > 2
% Output displays per step are commented out.
%disp(sprintf('Word to stem: %s', b));
x = step1ab(b, k, k0);
%disp(sprintf('Steps 1A and B yield: %s', x{1}));
x = step1c(x{1}, x{2}, k0);
%disp(sprintf('Step 1C yields: %s', x{1}));
x = step2(x{1}, x{2}, k0);
%disp(sprintf('Step 2 yields: %s', x{1}));
x = step3(x{1}, x{2}, k0);
%disp(sprintf('Step 3 yields: %s', x{1}));
x = step4(x{1}, x{2}, k0);
%disp(sprintf('Step 4 yields: %s', x{1}));
x = step5(x{1}, x{2}, k0);
%disp(sprintf('Step 5 yields: %s', x{1}));
stem = x{1};
end
% cons(j) is TRUE <=> b[j] is a consonant.
function c = cons(i, b, k0)
c = true;
switch(b(i))
case {'a', 'e', 'i', 'o', 'u'}
c = false;
case 'y'
if i == k0
c = true;
else
c = ~cons(i - 1, b, k0);
end
end
% mseq() measures the number of consonant sequences between k0 and j. If
% c is a consonant sequence and v a vowel sequence, and <..> indicates
% arbitrary presence,
% <c><v> gives 0
% <c>vc<v> gives 1
% <c>vcvc<v> gives 2
% <c>vcvcvc<v> gives 3
% ....
function n = measure(b, k0)
global j;
n = 0;
i = k0;
while true
if i > j
return
end
if ~cons(i, b, k0)
break;
end
i = i + 1;
end
i = i + 1;
while true
while true
if i > j
return
end
if cons(i, b, k0)
break;
end
i = i + 1;
end
i = i + 1;
n = n + 1;
while true
if i > j
return
end
if ~cons(i, b, k0)
break;
end
i = i + 1;
end
i = i + 1;
end
% vowelinstem() is TRUE <=> k0,...j contains a vowel
function vis = vowelinstem(b, k0)
global j;
for i = k0:j,
if ~cons(i, b, k0)
vis = true;
return
end
end
vis = false;
%doublec(i) is TRUE <=> i,(i-1) contain a double consonant.
function dc = doublec(i, b, k0)
if i < k0+1
dc = false;
return
end
if b(i) ~= b(i-1)
dc = false;
return
end
dc = cons(i, b, k0);
% cvc(j) is TRUE <=> j-2,j-1,j has the form consonant - vowel - consonant
% and also if the second c is not w,x or y. this is used when trying to
% restore an e at the end of a short word. e.g.
%
% cav(e), lov(e), hop(e), crim(e), but
% snow, box, tray.
function c1 = cvc(i, b, k0)
if ((i < (k0+2)) || ~cons(i, b, k0) || cons(i-1, b, k0) || ~cons(i-2, b, k0))
c1 = false;
else
if (b(i) == 'w' || b(i) == 'x' || b(i) == 'y')
c1 = false;
return
end
c1 = true;
end
% ends(s) is TRUE <=> k0,...k ends with the string s.
function s = ends(str, b, k)
global j;
if (str(length(str)) ~= b(k))
s = false;
return
end % tiny speed-up
if (length(str) > k)
s = false;
return
end
if strcmp(b(k-length(str)+1:k), str)
s = true;
j = k - length(str);
return
else
s = false;
end
% setto(s) sets (j+1),...k to the characters in the string s, readjusting
% k accordingly.
function so = setto(s, b, k)
global j;
for i = j+1:(j+length(s))
b(i) = s(i-j);
end
if k > j+length(s)
b((j+length(s)+1):k) = '';
end
k = length(b);
so = {b, k};
% rs(s) is used further down.
% [Note: possible null/value for r if rs is called]
function r = rs(str, b, k, k0)
r = {b, k};
if measure(b, k0) > 0
r = setto(str, b, k);
end
% step1ab() gets rid of plurals and -ed or -ing. e.g.
% caresses -> caress
% ponies -> poni
% ties -> ti
% caress -> caress
% cats -> cat
% feed -> feed
% agreed -> agree
% disabled -> disable
% matting -> mat
% mating -> mate
% meeting -> meet
% milling -> mill
% messing -> mess
% meetings -> meet
function s1ab = step1ab(b, k, k0)
global j;
if b(k) == 's'
if ends('sses', b, k)
k = k-2;
elseif ends('ies', b, k)
retVal = setto('i', b, k);
b = retVal{1};
k = retVal{2};
elseif (b(k-1) ~= 's')
k = k-1;
end
end
if ends('eed', b, k)
if measure(b, k0) > 0;
k = k-1;
end
elseif (ends('ed', b, k) || ends('ing', b, k)) && vowelinstem(b, k0)
k = j;
retVal = {b, k};
if ends('at', b, k)
retVal = setto('ate', b(k0:k), k);
elseif ends('bl', b, k)
retVal = setto('ble', b(k0:k), k);
elseif ends('iz', b, k)
retVal = setto('ize', b(k0:k), k);
elseif doublec(k, b, k0)
retVal = {b, k-1};
if b(retVal{2}) == 'l' || b(retVal{2}) == 's' || ...
b(retVal{2}) == 'z'
retVal = {retVal{1}, retVal{2}+1};
end
elseif measure(b, k0) == 1 && cvc(k, b, k0)
retVal = setto('e', b(k0:k), k);
end
k = retVal{2};
b = retVal{1}(k0:k);
end
j = k;
s1ab = {b(k0:k), k};
% step1c() turns terminal y to i when there is another vowel in the stem.
function s1c = step1c(b, k, k0)
global j;
if ends('y', b, k) && vowelinstem(b, k0)
b(k) = 'i';
end
j = k;
s1c = {b, k};
% step2() maps double suffices to single ones. so -ization ( = -ize plus
% -ation) maps to -ize etc. note that the string before the suffix must give
% m() > 0.
function s2 = step2(b, k, k0)
global j;
s2 = {b, k};
switch b(k-1)
case {'a'}
if ends('ational', b, k) s2 = rs('ate', b, k, k0);
elseif ends('tional', b, k) s2 = rs('tion', b, k, k0); end;
case {'c'}
if ends('enci', b, k) s2 = rs('ence', b, k, k0);
elseif ends('anci', b, k) s2 = rs('ance', b, k, k0); end;
case {'e'}
if ends('izer', b, k) s2 = rs('ize', b, k, k0); end;
case {'l'}
if ends('bli', b, k) s2 = rs('ble', b, k, k0);
elseif ends('alli', b, k) s2 = rs('al', b, k, k0);
elseif ends('entli', b, k) s2 = rs('ent', b, k, k0);
elseif ends('eli', b, k) s2 = rs('e', b, k, k0);
elseif ends('ousli', b, k) s2 = rs('ous', b, k, k0); end;
case {'o'}
if ends('ization', b, k) s2 = rs('ize', b, k, k0);
elseif ends('ation', b, k) s2 = rs('ate', b, k, k0);
elseif ends('ator', b, k) s2 = rs('ate', b, k, k0); end;
case {'s'}
if ends('alism', b, k) s2 = rs('al', b, k, k0);
elseif ends('iveness', b, k) s2 = rs('ive', b, k, k0);
elseif ends('fulness', b, k) s2 = rs('ful', b, k, k0);
elseif ends('ousness', b, k) s2 = rs('ous', b, k, k0); end;
case {'t'}
if ends('aliti', b, k) s2 = rs('al', b, k, k0);
elseif ends('iviti', b, k) s2 = rs('ive', b, k, k0);
elseif ends('biliti', b, k) s2 = rs('ble', b, k, k0); end;
case {'g'}
if ends('logi', b, k) s2 = rs('log', b, k, k0); end;
end
j = s2{2};
% step3() deals with -ic-, -full, -ness etc. similar strategy to step2.
function s3 = step3(b, k, k0)
global j;
s3 = {b, k};
switch b(k)
case {'e'}
if ends('icate', b, k) s3 = rs('ic', b, k, k0);
elseif ends('ative', b, k) s3 = rs('', b, k, k0);
elseif ends('alize', b, k) s3 = rs('al', b, k, k0); end;
case {'i'}
if ends('iciti', b, k) s3 = rs('ic', b, k, k0); end;
case {'l'}
if ends('ical', b, k) s3 = rs('ic', b, k, k0);
elseif ends('ful', b, k) s3 = rs('', b, k, k0); end;
case {'s'}
if ends('ness', b, k) s3 = rs('', b, k, k0); end;
end
j = s3{2};
% step4() takes off -ant, -ence etc., in context <c>vcvc<v>.
function s4 = step4(b, k, k0)
global j;
switch b(k-1)
case {'a'}
if ends('al', b, k) end;
case {'c'}
if ends('ance', b, k)
elseif ends('ence', b, k) end;
case {'e'}
if ends('er', b, k) end;
case {'i'}
if ends('ic', b, k) end;
case {'l'}
if ends('able', b, k)
elseif ends('ible', b, k) end;
case {'n'}
if ends('ant', b, k)
elseif ends('ement', b, k)
elseif ends('ment', b, k)
elseif ends('ent', b, k) end;
case {'o'}
if ends('ion', b, k)
if j == 0
elseif ~(strcmp(b(j),'s') || strcmp(b(j),'t'))
j = k;
end
elseif ends('ou', b, k) end;
case {'s'}
if ends('ism', b, k) end;
case {'t'}
if ends('ate', b, k)
elseif ends('iti', b, k) end;
case {'u'}
if ends('ous', b, k) end;
case {'v'}
if ends('ive', b, k) end;
case {'z'}
if ends('ize', b, k) end;
end
if measure(b, k0) > 1
s4 = {b(k0:j), j};
else
s4 = {b(k0:k), k};
end
% step5() removes a final -e if m() > 1, and changes -ll to -l if m() > 1.
function s5 = step5(b, k, k0)
global j;
j = k;
if b(k) == 'e'
a = measure(b, k0);
if (a > 1) || ((a == 1) && ~cvc(k-1, b, k0))
k = k-1;
end
end
if (b(k) == 'l') && doublec(k, b, k0) && (measure(b, k0) > 1)
k = k-1;
end
s5 = {b(k0:k), k};
|
github
|
Bladefidz/machine-learning-master
|
submit.m
|
.m
|
machine-learning-master/coursera/machine-learning-standford-univerity/machine-learning-ex7/ex7/submit.m
| 1,438 |
utf_8
|
665ea5906aad3ccfd94e33a40c58e2ce
|
function submit()
addpath('./lib');
conf.assignmentSlug = 'k-means-clustering-and-pca';
conf.itemName = 'K-Means Clustering and PCA';
conf.partArrays = { ...
{ ...
'1', ...
{ 'findClosestCentroids.m' }, ...
'Find Closest Centroids (k-Means)', ...
}, ...
{ ...
'2', ...
{ 'computeCentroids.m' }, ...
'Compute Centroid Means (k-Means)', ...
}, ...
{ ...
'3', ...
{ 'pca.m' }, ...
'PCA', ...
}, ...
{ ...
'4', ...
{ 'projectData.m' }, ...
'Project Data (PCA)', ...
}, ...
{ ...
'5', ...
{ 'recoverData.m' }, ...
'Recover Data (PCA)', ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId, auxstring)
% Random Test Cases
X = reshape(sin(1:165), 15, 11);
Z = reshape(cos(1:121), 11, 11);
C = Z(1:5, :);
idx = (1 + mod(1:15, 3))';
if partId == '1'
idx = findClosestCentroids(X, C);
out = sprintf('%0.5f ', idx(:));
elseif partId == '2'
centroids = computeCentroids(X, idx, 3);
out = sprintf('%0.5f ', centroids(:));
elseif partId == '3'
[U, S] = pca(X);
out = sprintf('%0.5f ', abs([U(:); S(:)]));
elseif partId == '4'
X_proj = projectData(X, Z, 5);
out = sprintf('%0.5f ', X_proj(:));
elseif partId == '5'
X_rec = recoverData(X(:,1:5), Z, 5);
out = sprintf('%0.5f ', X_rec(:));
end
end
|
github
|
Bladefidz/machine-learning-master
|
submit.m
|
.m
|
machine-learning-master/coursera/machine-learning-standford-univerity/machine-learning-ex5/ex5/submit.m
| 1,765 |
utf_8
|
b1804fe5854d9744dca981d250eda251
|
function submit()
addpath('./lib');
conf.assignmentSlug = 'regularized-linear-regression-and-bias-variance';
conf.itemName = 'Regularized Linear Regression and Bias/Variance';
conf.partArrays = { ...
{ ...
'1', ...
{ 'linearRegCostFunction.m' }, ...
'Regularized Linear Regression Cost Function', ...
}, ...
{ ...
'2', ...
{ 'linearRegCostFunction.m' }, ...
'Regularized Linear Regression Gradient', ...
}, ...
{ ...
'3', ...
{ 'learningCurve.m' }, ...
'Learning Curve', ...
}, ...
{ ...
'4', ...
{ 'polyFeatures.m' }, ...
'Polynomial Feature Mapping', ...
}, ...
{ ...
'5', ...
{ 'validationCurve.m' }, ...
'Validation Curve', ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId, auxstring)
% Random Test Cases
X = [ones(10,1) sin(1:1.5:15)' cos(1:1.5:15)'];
y = sin(1:3:30)';
Xval = [ones(10,1) sin(0:1.5:14)' cos(0:1.5:14)'];
yval = sin(1:10)';
if partId == '1'
[J] = linearRegCostFunction(X, y, [0.1 0.2 0.3]', 0.5);
out = sprintf('%0.5f ', J);
elseif partId == '2'
[J, grad] = linearRegCostFunction(X, y, [0.1 0.2 0.3]', 0.5);
out = sprintf('%0.5f ', grad);
elseif partId == '3'
[error_train, error_val] = ...
learningCurve(X, y, Xval, yval, 1);
out = sprintf('%0.5f ', [error_train(:); error_val(:)]);
elseif partId == '4'
[X_poly] = polyFeatures(X(2,:)', 8);
out = sprintf('%0.5f ', X_poly);
elseif partId == '5'
[lambda_vec, error_train, error_val] = ...
validationCurve(X, y, Xval, yval);
out = sprintf('%0.5f ', ...
[lambda_vec(:); error_train(:); error_val(:)]);
end
end
|
github
|
Bladefidz/machine-learning-master
|
submit.m
|
.m
|
machine-learning-master/coursera/machine-learning-standford-univerity/machine-learning-ex3/ex3/submit.m
| 1,567 |
utf_8
|
1dba733a05282b2db9f2284548483b81
|
function submit()
addpath('./lib');
conf.assignmentSlug = 'multi-class-classification-and-neural-networks';
conf.itemName = 'Multi-class Classification and Neural Networks';
conf.partArrays = { ...
{ ...
'1', ...
{ 'lrCostFunction.m' }, ...
'Regularized Logistic Regression', ...
}, ...
{ ...
'2', ...
{ 'oneVsAll.m' }, ...
'One-vs-All Classifier Training', ...
}, ...
{ ...
'3', ...
{ 'predictOneVsAll.m' }, ...
'One-vs-All Classifier Prediction', ...
}, ...
{ ...
'4', ...
{ 'predict.m' }, ...
'Neural Network Prediction Function' ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId, auxdata)
% Random Test Cases
X = [ones(20,1) (exp(1) * sin(1:1:20))' (exp(0.5) * cos(1:1:20))'];
y = sin(X(:,1) + X(:,2)) > 0;
Xm = [ -1 -1 ; -1 -2 ; -2 -1 ; -2 -2 ; ...
1 1 ; 1 2 ; 2 1 ; 2 2 ; ...
-1 1 ; -1 2 ; -2 1 ; -2 2 ; ...
1 -1 ; 1 -2 ; -2 -1 ; -2 -2 ];
ym = [ 1 1 1 1 2 2 2 2 3 3 3 3 4 4 4 4 ]';
t1 = sin(reshape(1:2:24, 4, 3));
t2 = cos(reshape(1:2:40, 4, 5));
if partId == '1'
[J, grad] = lrCostFunction([0.25 0.5 -0.5]', X, y, 0.1);
out = sprintf('%0.5f ', J);
out = [out sprintf('%0.5f ', grad)];
elseif partId == '2'
out = sprintf('%0.5f ', oneVsAll(Xm, ym, 4, 0.1));
elseif partId == '3'
out = sprintf('%0.5f ', predictOneVsAll(t1, Xm));
elseif partId == '4'
out = sprintf('%0.5f ', predict(t1, t2, Xm));
end
end
|
github
|
Bladefidz/machine-learning-master
|
submit.m
|
.m
|
machine-learning-master/coursera/machine-learning-standford-univerity/machine-learning-ex8/ex8/submit.m
| 2,135 |
utf_8
|
eebb8c0a1db5a4df20b4c858603efad6
|
function submit()
addpath('./lib');
conf.assignmentSlug = 'anomaly-detection-and-recommender-systems';
conf.itemName = 'Anomaly Detection and Recommender Systems';
conf.partArrays = { ...
{ ...
'1', ...
{ 'estimateGaussian.m' }, ...
'Estimate Gaussian Parameters', ...
}, ...
{ ...
'2', ...
{ 'selectThreshold.m' }, ...
'Select Threshold', ...
}, ...
{ ...
'3', ...
{ 'cofiCostFunc.m' }, ...
'Collaborative Filtering Cost', ...
}, ...
{ ...
'4', ...
{ 'cofiCostFunc.m' }, ...
'Collaborative Filtering Gradient', ...
}, ...
{ ...
'5', ...
{ 'cofiCostFunc.m' }, ...
'Regularized Cost', ...
}, ...
{ ...
'6', ...
{ 'cofiCostFunc.m' }, ...
'Regularized Gradient', ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId, auxstring)
% Random Test Cases
n_u = 3; n_m = 4; n = 5;
X = reshape(sin(1:n_m*n), n_m, n);
Theta = reshape(cos(1:n_u*n), n_u, n);
Y = reshape(sin(1:2:2*n_m*n_u), n_m, n_u);
R = Y > 0.5;
pval = [abs(Y(:)) ; 0.001; 1];
Y = (Y .* double(R)); % set 'Y' values to 0 for movies not reviewed
yval = [R(:) ; 1; 0];
params = [X(:); Theta(:)];
if partId == '1'
[mu sigma2] = estimateGaussian(X);
out = sprintf('%0.5f ', [mu(:); sigma2(:)]);
elseif partId == '2'
[bestEpsilon bestF1] = selectThreshold(yval, pval);
out = sprintf('%0.5f ', [bestEpsilon(:); bestF1(:)]);
elseif partId == '3'
[J] = cofiCostFunc(params, Y, R, n_u, n_m, ...
n, 0);
out = sprintf('%0.5f ', J(:));
elseif partId == '4'
[J, grad] = cofiCostFunc(params, Y, R, n_u, n_m, ...
n, 0);
out = sprintf('%0.5f ', grad(:));
elseif partId == '5'
[J] = cofiCostFunc(params, Y, R, n_u, n_m, ...
n, 1.5);
out = sprintf('%0.5f ', J(:));
elseif partId == '6'
[J, grad] = cofiCostFunc(params, Y, R, n_u, n_m, ...
n, 1.5);
out = sprintf('%0.5f ', grad(:));
end
end
|
github
|
Bladefidz/machine-learning-master
|
submit.m
|
.m
|
machine-learning-master/coursera/machine-learning-standford-univerity/machine-learning-ex1/ex1/submit.m
| 1,876 |
utf_8
|
8d1c467b830a89c187c05b121cb8fbfd
|
function submit()
addpath('./lib');
conf.assignmentSlug = 'linear-regression';
conf.itemName = 'Linear Regression with Multiple Variables';
conf.partArrays = { ...
{ ...
'1', ...
{ 'warmUpExercise.m' }, ...
'Warm-up Exercise', ...
}, ...
{ ...
'2', ...
{ 'computeCost.m' }, ...
'Computing Cost (for One Variable)', ...
}, ...
{ ...
'3', ...
{ 'gradientDescent.m' }, ...
'Gradient Descent (for One Variable)', ...
}, ...
{ ...
'4', ...
{ 'featureNormalize.m' }, ...
'Feature Normalization', ...
}, ...
{ ...
'5', ...
{ 'computeCostMulti.m' }, ...
'Computing Cost (for Multiple Variables)', ...
}, ...
{ ...
'6', ...
{ 'gradientDescentMulti.m' }, ...
'Gradient Descent (for Multiple Variables)', ...
}, ...
{ ...
'7', ...
{ 'normalEqn.m' }, ...
'Normal Equations', ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId)
% Random Test Cases
X1 = [ones(20,1) (exp(1) + exp(2) * (0.1:0.1:2))'];
Y1 = X1(:,2) + sin(X1(:,1)) + cos(X1(:,2));
X2 = [X1 X1(:,2).^0.5 X1(:,2).^0.25];
Y2 = Y1.^0.5 + Y1;
if partId == '1'
out = sprintf('%0.5f ', warmUpExercise());
elseif partId == '2'
out = sprintf('%0.5f ', computeCost(X1, Y1, [0.5 -0.5]'));
elseif partId == '3'
out = sprintf('%0.5f ', gradientDescent(X1, Y1, [0.5 -0.5]', 0.01, 10));
elseif partId == '4'
out = sprintf('%0.5f ', featureNormalize(X2(:,2:4)));
elseif partId == '5'
out = sprintf('%0.5f ', computeCostMulti(X2, Y2, [0.1 0.2 0.3 0.4]'));
elseif partId == '6'
out = sprintf('%0.5f ', gradientDescentMulti(X2, Y2, [-0.1 -0.2 -0.3 -0.4]', 0.01, 10));
elseif partId == '7'
out = sprintf('%0.5f ', normalEqn(X2, Y2));
end
end
|
github
|
fuenwang/BiomedicalSound-master
|
saveFig.m
|
.m
|
BiomedicalSound-master/hw02/submit/saveFig.m
| 225 |
utf_8
|
1e79a8c1f6d13a39941aa0d64550e925
|
%
% EE6265 Fu-En Wang 106061531 HW2 11/14/2017
%
function saveFig(fig, path)
fig.PaperPositionMode = 'auto';
fig_pos = fig.PaperPosition;
fig.PaperSize = [fig_pos(3) fig_pos(4)];
print(fig, path, '-dpdf')
end
|
github
|
fuenwang/BiomedicalSound-master
|
cyst_phantom.m
|
.m
|
BiomedicalSound-master/hw02/submit/cyst_phantom.m
| 1,094 |
utf_8
|
bb73536838617945fa437e231968c9b4
|
%
% EE6265 Fu-En Wang 106061531 HW2 11/14/2017
%
function [pos, amp] = cyst_phantom (N, C)
x_size = 15/1000; % Width of phantom [mm]
y_size = 0; % Transverse width of phantom [mm]
z_size = 20/1000; % Height of phantom [mm]
z_start = 30/1000; % Start of phantom surface [mm];
% Creat the general scatterers
lambda = 4.2286e-04; % in m
ggg = 2 * lambda;
grid_x = 0:(ggg):x_size;
grid_y = grid_x;
grid_z = 0:(ggg):z_size;
x = (randsample(grid_x, N, true) / x_size - 0.5)' * x_size;
y = (randsample(grid_y, N, true) - 0.5)' * y_size;
z = (randsample(grid_z, N, true)' / z_size)*z_size + z_start;
%{
rand('state',12345);
x = (rand (N,1)-0.5)*x_size;
y = (rand (N,1)-0.5)*y_size;
z = rand (N,1)*z_size + z_start;
%}
pos=[x y z];
% Generate the amplitudes with a Gaussian distribution
randn('state',45678);
amp=randn(N,1);
% Make the cyst and set the amplitudes to zero inside
% 5 mm cyst
r=2.5/1000; % Radius of cyst [mm]
%r=2.7/1000;
zc=40/1000;
xc=0/1000; % Place of cyst [mm]
inside = find(((x-xc).^2 + (z-zc).^2) < r^2) ;
amp(inside) = amp(inside)*(10^(C/20));
|
github
|
fuenwang/BiomedicalSound-master
|
getNewArray.m
|
.m
|
BiomedicalSound-master/hw02/submit/getNewArray.m
| 306 |
utf_8
|
0ed688092474e37118bd3155e3545c62
|
%
% EE6265 Fu-En Wang 106061531 HW2 11/14/2017
%
function [new_data] = getNewArray(origin, M, N)
new_data = zeros(1, N);
for i = 1:N
if i * M <= 1000
index = (i-1)*M+1 : i*M;
else
index = (i-1)*M+1 : length(origin);
end
new_data(i) = sum(origin(index));
end
end
|
github
|
fuenwang/BiomedicalSound-master
|
saveFig.m
|
.m
|
BiomedicalSound-master/hw02/code/saveFig.m
| 225 |
utf_8
|
1e79a8c1f6d13a39941aa0d64550e925
|
%
% EE6265 Fu-En Wang 106061531 HW2 11/14/2017
%
function saveFig(fig, path)
fig.PaperPositionMode = 'auto';
fig_pos = fig.PaperPosition;
fig.PaperSize = [fig_pos(3) fig_pos(4)];
print(fig, path, '-dpdf')
end
|
github
|
fuenwang/BiomedicalSound-master
|
cyst_phantom.m
|
.m
|
BiomedicalSound-master/hw02/code/cyst_phantom.m
| 1,094 |
utf_8
|
bb73536838617945fa437e231968c9b4
|
%
% EE6265 Fu-En Wang 106061531 HW2 11/14/2017
%
function [pos, amp] = cyst_phantom (N, C)
x_size = 15/1000; % Width of phantom [mm]
y_size = 0; % Transverse width of phantom [mm]
z_size = 20/1000; % Height of phantom [mm]
z_start = 30/1000; % Start of phantom surface [mm];
% Creat the general scatterers
lambda = 4.2286e-04; % in m
ggg = 2 * lambda;
grid_x = 0:(ggg):x_size;
grid_y = grid_x;
grid_z = 0:(ggg):z_size;
x = (randsample(grid_x, N, true) / x_size - 0.5)' * x_size;
y = (randsample(grid_y, N, true) - 0.5)' * y_size;
z = (randsample(grid_z, N, true)' / z_size)*z_size + z_start;
%{
rand('state',12345);
x = (rand (N,1)-0.5)*x_size;
y = (rand (N,1)-0.5)*y_size;
z = rand (N,1)*z_size + z_start;
%}
pos=[x y z];
% Generate the amplitudes with a Gaussian distribution
randn('state',45678);
amp=randn(N,1);
% Make the cyst and set the amplitudes to zero inside
% 5 mm cyst
r=2.5/1000; % Radius of cyst [mm]
%r=2.7/1000;
zc=40/1000;
xc=0/1000; % Place of cyst [mm]
inside = find(((x-xc).^2 + (z-zc).^2) < r^2) ;
amp(inside) = amp(inside)*(10^(C/20));
|
github
|
fuenwang/BiomedicalSound-master
|
getNewArray.m
|
.m
|
BiomedicalSound-master/hw02/code/getNewArray.m
| 306 |
utf_8
|
0ed688092474e37118bd3155e3545c62
|
%
% EE6265 Fu-En Wang 106061531 HW2 11/14/2017
%
function [new_data] = getNewArray(origin, M, N)
new_data = zeros(1, N);
for i = 1:N
if i * M <= 1000
index = (i-1)*M+1 : i*M;
else
index = (i-1)*M+1 : length(origin);
end
new_data(i) = sum(origin(index));
end
end
|
github
|
fuenwang/BiomedicalSound-master
|
xdc_dynamic_focus.m
|
.m
|
BiomedicalSound-master/hw02/code/Field2/xdc_dynamic_focus.m
| 1,324 |
utf_8
|
5b19e1bc74874267f2480741a74b9a62
|
% Procedure for using dynamic focusing for an aperture
%
% Calling: xdc_dynamic_focus (Th, time, dir_zx,dir_zy);
%
% Parameters: Th - Pointer to the transducer aperture.
% time - Time after which the dynamic focus is valid.
% dir_zx - Direction (angle) in radians for the dynamic
% focus. The direction is taken from the center for
% the focus of the transducer in the z-x plane.
% dir_zy - Direction (angle) in radians for the dynamic
% focus. The direction is taken from the center for
% the focus of the transducer in the z-y plane.
%
% Return: none.
%
% Version 1.02, March 19, 1998 by Joergen Arendt Jensen
function res = xdc_dynamic_focus (Th,time,dir_zx,dir_zy)
% Check the times vector
[m1,n]=size(time);
if ((n ~= 1) & (m1 ~= 1))
error ('Time must be a scalar');
end
% Check the direction
[m1,n]=size(dir_zx);
if ((n ~= 1) & (m1 ~= 1))
error ('Direction must be a scalar');
end
% Check the direction
[m1,n]=size(dir_zy);
if ((n ~= 1) & (m1 ~= 1))
error ('Direction must be a scalar');
end
% Call the C-part of the program to insert focus
Mat_field (1062,Th,time,dir_zx,dir_zy);
|
github
|
fuenwang/BiomedicalSound-master
|
xdc_focus.m
|
.m
|
BiomedicalSound-master/hw02/code/Field2/xdc_focus.m
| 974 |
utf_8
|
36043bd3d056fa7dd1245db340ed62d8
|
% Procedure for creating a focus time line for an aperture
%
% Calling: xdc_focus (Th, times, points);
%
% Parameters: Th - Pointer to the transducer aperture.
% times - Time after which the associated focus is valid.
% points - Focus points. Vector with three columns (x,y,z)
% and one row for each field point.
%
% Return: none.
%
% Version 1.0, November 28, 1995 by Joergen Arendt Jensen
function res = xdc_focus (Th,times,points)
% Check the times vector
[m1,n]=size(times);
if (n ~= 1)
error ('Times vectors must have one columns');
end
% Check the point array
[m2,n]=size(points);
if (n ~= 3)
error ('Points array must have three columns');
end
% Check both arrays
if (m1 ~= m2)
error ('There must be the same number of rows for times and focal points');
end
% Call the C-part of the program to insert focus
Mat_field (1060,Th,times,points);
|
github
|
fuenwang/BiomedicalSound-master
|
xdc_triangles.m
|
.m
|
BiomedicalSound-master/hw02/code/Field2/xdc_triangles.m
| 1,603 |
utf_8
|
540861e828a1f99427c7a46c07cbcb70
|
% Procedure for creating an aperture with a number
% of physical elements consisting of triangles
%
% Calling: Th = xdc_triangles (data, center, focus);
%
% data - Information about the triangles. One row
% for each triangle. The contents is:
%
% Index Variable Value
% -----------------------------------------------------------------------
% 1 no The number for the physical aperture starting from one
% 2-4 x1,y1,z1 First corner coordinate
% 5-7 x2,y2,z2 Second corner coordinate
% 8-10 x3,y3,z3 Third corner coordinate
% 11 apo Apodization value for this element.
%
% The physical triangle number given must be in increasing order.
%
% center - The center of the physical elements. One line for
% each element starting from 1.
%
% focus - The fixed focus for this aperture.
%
% All dimensions are in meters.
%
% Return: A handle Th as a pointer to this transducer aperture.
%
% Version 1.0, January 20, 1999 by Joergen Arendt Jensen
function Th = xdc_triangles (data, center, focus)
% Check that all parameters are valid
[n,m] = size(data);
if (m~=11)
error ('Field error: Not sufficient coordinates for triangles')
end
[n,m] = size(center);
if (m~=3)
error ('Field error: Not correct size for center points')
end
[n,m] = size(focus);
if (n~=1) | (m~=3)
error ('Field error: Not correct size for focus point')
end
% Call the C-part of the program to create aperture
Th = Mat_field (1023, data, center, focus);
|
github
|
fuenwang/BiomedicalSound-master
|
field_logo.m
|
.m
|
BiomedicalSound-master/hw02/code/Field2/field_logo.m
| 393 |
utf_8
|
74305dd23287025a2e56f3921eb0621a
|
% Function to display the logo for field
%
% Version 1.3, August 10, 2007 by Joergen Arendt Jensen
% Error in loading filr fixed
function res = field_logo
% Create a window and display the Field II logo
h=figure;
axes('position',[0 0 1 1]);
place=which ('logo_field.mat');
eval(['load ',place])
image(data1);
axis off
colormap(map);
drawnow;
pause(5)
close(h);
|
github
|
fuenwang/BiomedicalSound-master
|
xdc_linear_multirow.m
|
.m
|
BiomedicalSound-master/hw02/code/Field2/xdc_linear_multirow.m
| 2,353 |
utf_8
|
18208adff504f9015f3174ab59d46a54
|
% Procedure for creating a linear array transducer
% with an number of rows (1.5D array)
%
% Calling: Th = xdc_linear_multirow (no_elem_x, width, no_ele_y, heights, kerf_x, kerf_y,
% no_sub_x, no_sub_y, focus);
%
% Parameters: no_elem_x - Number of physical elements in x-direction.
% width - Width in x-direction of elements.
% no_elem_y - Number of physical elements in y-direction.
% heights - Heights of the element rows in the y-direction.
% Vector with no_elem_y values.
% kerf_x - Width in x-direction between elements.
% kerf_y - Gap in y-direction between elements.
% no_sub_x - Number of sub-divisions in x-direction of physical elements.
% no_sub_y - Number of sub-divisions in y-direction of physical elements.
% focus[] - Fixed focus for array (x,y,z). Vector with three elements.
%
% Return: A handle Th as a pointer to this transducer aperture.
%
% Version 1.0, June 19, 1998 by Joergen Arendt Jensen
function Th = xdc_linear_multirow (no_elem_x, width, no_elem_y, heights, kerf_x, kerf_y, no_sub_x, no_sub_y, focus)
% Check that all parameters are valid
if (no_elem_x<1)
error ('Field error: Illegal number of physical transducer elements in x-direction')
end
if (width<=0)
error ('Field error: Width of elements is negativ or zero')
end
if (no_elem_y<1)
error ('Field error: Illegal number of physical transducer elements in y-direction')
end
for i=1:no_elem_y
if (heights(i)<=0)
error ('Field error: Height of elements is negativ or zero')
end
end
if (kerf_x<0)
error ('Field error: Kerf in x-direction is negativ')
end
if (kerf_y<0)
error ('Field error: Kerf in y-direction is negativ')
end
if (no_sub_x<1) | (no_sub_y<1)
error ('Field error: Number of mathematical elements must 1 or more')
end
if (min(size(focus))~=1) | (max(size(focus))~=3)
error ('Field error: Focus must be a vector with three elements')
end
% Call the C-part of the program to create aperture
Th = Mat_field (1012,no_elem_x, width, no_elem_y, heights, kerf_x, kerf_y, no_sub_x, no_sub_y, focus);
|
github
|
fuenwang/BiomedicalSound-master
|
calc_hhp.m
|
.m
|
BiomedicalSound-master/hw02/code/Field2/calc_hhp.m
| 846 |
utf_8
|
b3e9ab563d3bca28df72800ae37fff6d
|
% Procedure for calculating the pulse echo field.
%
% Calling: [hhp, start_time] = calc_hhp(Th1, Th2, points);
%
% Parameters: Th1 - Pointer to the transmit aperture.
% Th2 - Pointer to the receive aperture.
% points - Field points. Vector with three columns (x,y,z)
% and one row for each field point.
%
% Return: hhp - Received voltage trace.
% start_time - The time for the first sample in hhp.
%
% Version 1.0, November 22, 1995 by Joergen Arendt Jensen
function [hhp, start_time] = calc_hhp (Th1, Th2, points)
% Check the point array
[m,n]=size(points);
if (n ~= 3)
error ('Points array must have three columns');
end
% Call the C-part of the program to show aperture
[hhp, start_time] = Mat_field (4003,Th1,Th2,points);
|
github
|
fuenwang/BiomedicalSound-master
|
field_debug.m
|
.m
|
BiomedicalSound-master/hw02/code/Field2/field_debug.m
| 417 |
utf_8
|
b8b796a2dc96f73d1e1cb36de01190f2
|
% Procedure for initialize the Field II debugging. This will print
% out various information about the programs inner working.
%
% Calling: field_debug(state)
%
% Parameters: State - 1: debugging, 0: no debugging.
%
% Return: nothing.
%
% Version 1.0, November 20, 1995 by Joergen Arendt Jensen
function res = field_debug (state)
% Call the C-part of the program to debug it
Mat_field (5010,state);
|
github
|
fuenwang/BiomedicalSound-master
|
ele_waveform.m
|
.m
|
BiomedicalSound-master/hw02/code/Field2/ele_waveform.m
| 1,143 |
utf_8
|
0573a5fbc90caa641825a0e8c53267e5
|
% Procedure for setting the waveform of individual
% physical elements of the transducer
%
% Calling: ele_waveform (Th, element_no, samples);
%
% Parameters: Th - Pointer to the transducer aperture.
% element_no - Column vector with one integer for each physical
% element to set waveform for.
% samples - Sample values for waveform. Matrix with one row for each
% physical element and a number of columns equal to the
% number of samples in the waveforms.
%
% Return: none.
%
% Version 1.0, July 1, 1998 by Joergen Arendt Jensen
function res = ele_waveform (Th, element_no, samples)
% Check the element number vector
[m1,n]=size(element_no);
if (n ~= 1)
error ('Element_no vector must have one column');
end
[m2,n]=size(samples);
% Check both arrays
if (m1 ~= m2)
error ('There must be the same number of rows for element_no vector and samples matrix');
end
% Call the C-part of the program to insert apodization
Mat_field (1082, Th, element_no, samples);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.