diff --git a/Analysis/Build_Connectivity_Matrix.m b/Analysis/Build_Connectivity_Matrix.m deleted file mode 100755 index 4befae4..0000000 --- a/Analysis/Build_Connectivity_Matrix.m +++ /dev/null @@ -1,25 +0,0 @@ -%% This function constructs a connectivity matrix from the indices of -% cluster assignment -function [M] = Build_Connectivity_Matrix(IDX,tmp_ss,type,n_items) - - IDX_full = zeros(n_items,1); - - switch type - case 'items' - IDX_full(tmp_ss) = IDX; - case 'dims' - IDX_full = IDX; - case 'subjects' - IDX_full(tmp_ss) = IDX; - end - - M = zeros(n_items,n_items); - - for i = 1:length(IDX_full) - for j = 1:length(IDX_full) - if (IDX_full(i) == IDX_full(j)) && (IDX_full(i) > 0) - M(i,j) = 1; - end - end - end -end \ No newline at end of file diff --git a/Analysis/CAPToNIFTI.m b/Analysis/CAPToNIFTI.m deleted file mode 100755 index 51622ba..0000000 --- a/Analysis/CAPToNIFTI.m +++ /dev/null @@ -1,29 +0,0 @@ -%% Converts CAPs (matlab matrix) into NIFTI files -% CAP must have size n_CAPs x n_voxels -function [] = CAPToNIFTI(CAP,mask,brain_info,savedir,savename) - - % Number of CAPs - n_CAPs = size(CAP,1); - - % Voxel size - voxel_size = diag(brain_info.mat); - voxel_size = voxel_size(1:end-1)'; - - voxel_shift = brain_info.mat(:,4); - voxel_shift = voxel_shift(1:end-1)'; - - % Converts each CAP into a 3D volume - for i = 1:n_CAPs - - tmp = CAP(i,:); - V = zeros(brain_info.dim); - V(mask) = tmp; - - tmp_NIFTI = make_nii(V,voxel_size,-voxel_shift./voxel_size); - - tmp_NIFTI.hdr.dime.datatype=64; - tmp_NIFTI.hdr.dime.bitpix=64; - - save_nii(tmp_NIFTI,fullfile(savedir,[savename,'_CAP',num2str(i),'.nii'])); - end -end \ No newline at end of file diff --git a/Analysis/CAP_AddToLog.m b/Analysis/CAP_AddToLog.m deleted file mode 100755 index c2e3e2f..0000000 --- a/Analysis/CAP_AddToLog.m +++ /dev/null @@ -1,19 +0,0 @@ -% Displays a message specifying what command is currently being run, and -% saves the same command and relevant information into a text file -function [Log] = CAP_AddToLog(Log,new_s,Params,Params_name) - - n = length(Log); - Log{n+1}{1} = [' ']; - %Log{n+1}{1} = [date,'/',num2str(round(hour(now))),'/',num2str(round(minute(now))),'/',num2str(round(second(now)))]; - Log{n+1}{2} = new_s; - - disp(new_s); - - % If we also want to save parameters, we do so - if nargin > 2 - for n2 = 1:length(Params) - Log{n+1}{n2+2} = [Params_name{n2},': ',num2str(Params{n2})]; - disp(Log{n+1}{n2+2}); - end - end -end \ No newline at end of file diff --git a/Analysis/CAP_AssignFrames.m b/Analysis/CAP_AssignFrames.m deleted file mode 100755 index 812f777..0000000 --- a/Analysis/CAP_AssignFrames.m +++ /dev/null @@ -1,14 +0,0 @@ -function [i] = CAP_AssignFrames(Cp,XON,d,T) - - % Value of correlation below which 5% of all values lie (so gives an - % estimate of a 'bad correlation for a trace belonging to a cluster' - CT = prctile(d,T); - - % cluster assignment in young - r = corr(Cp',XON); - [c,i] = max(r); - - % If the correlation value is too low (below threshold), the index is set - % to a new non-existing group - i(c 0,2); - end - - % Computation of state number and duration - - % We now want to compute dynamical metrics for each subject - for j = 1:size(TPM,1) - - % Marks the indices when we switch state (last element is idx_max+1) - TS = find(diff([-1337 TPM(j,:) -1337])~=0); - - % Length of each period (in s.) - Length = diff(TS)*TR; - - % Starting index of each period (in s., first element at 0 s.) - StartTime = (TS(1:end-1)-1)*TR; - - % Type of each period - SeqType = TPM(j,TS(1:end-1)); - - % Number of different states that have been entered - n_Periods = length(Length); - - % We now want to actually count how many times we enter each state, - % and how long we stay in each state - n_Sequences = zeros(1,n_clusters+2); - length_Sequences = cell(1,n_clusters+2); - - % We go through all sequences - for i = 1:n_Periods - - % We increase the appropriate counter - n_Sequences(SeqType(i)+2) = n_Sequences(SeqType(i)+2)+1; - length_Sequences{SeqType(i)+2} = [length_Sequences{SeqType(i)+2},Length(i)]; - end - - for ns = 1:n_clusters+2 - % Average duration of states - avg_length_Sequences(ns) = mean(length_Sequences{ns}); - end - - Duration{j} = length_Sequences; - Number(j,:) = n_Sequences; - Avg_Duration(j,:) = avg_length_Sequences; - - % For each state transition, we increment properly the matrix of - % transitions - for k = 1:n_frames-1 - TM(TPM(j,k)+2,TPM(j,k+1)+2,j) = TM(TPM(j,k)+2,TPM(j,k+1)+2,j) + 1; - end - end - - % We normalize the matrix by all the transitions - TM = TM/(n_frames-1); - -end \ No newline at end of file diff --git a/Analysis/CAP_Compute_SeedMap.m b/Analysis/CAP_Compute_SeedMap.m deleted file mode 100755 index 0edc449..0000000 --- a/Analysis/CAP_Compute_SeedMap.m +++ /dev/null @@ -1,32 +0,0 @@ -% tc: cell array with subject time courses -% seed: vector with seed information -function [SM,ASM] = CAP_Compute_SeedMap(tc,seed,is_SS) - - % If the seed(s) entered are similar across subjects, then we just plot - % the connectivity maps for the first one - if ~is_SS - tmp = logical(seed(:,1)); - - % Subjectwise seed map, i.e. correlation between the average seed trace - % and any other voxel. Size: n_voxels x 1 - if sum(tmp) == 1 - SM = cellfun(@(x) (corr(x,x(:,tmp))),tc,'un',0); - else - SM = cellfun(@(x) (corr(x,mean(x(:,tmp),2))),tc,'un',0); - end - - % Average seed map across subjects from the population - ASM = (mean(cell2mat(SM),2))'; - - % Else, we compute FC with the proper seed every time - else - - for s = 1:size(seed,2) - seed_cell{s} = logical(seed(:,s)); - end - - SM = cellfun(@(x,y) (corr(x,mean(x(:,y),2))),tc,seed_cell,'un',0); - - ASM = (mean(cell2mat(SM),2))'; - end -end \ No newline at end of file diff --git a/Analysis/CAP_ConsensusClustering.m b/Analysis/CAP_ConsensusClustering.m deleted file mode 100755 index e017aa2..0000000 --- a/Analysis/CAP_ConsensusClustering.m +++ /dev/null @@ -1,137 +0,0 @@ -%% This function performs consensus clustering over a range of K values -% The goal is to provide a measure of how good each value of K is -% -% Inputs: -% - X is the data matrix (cell array with each cell n_DP x n_DIM) -% - K_range is the range of K values to examine -% - Subsample_type defines how subsampling is done: across items (data -% points) if 'items', and across dimensions if 'dimensions' -% - Subsample_fraction is the fraction of the original data points, or -% dimensions, to keep for a given fold -% - n_folds is the number of folds over which to run -function [Consensus_ordered] = CAP_ConsensusClustering(X,K_range,Subsample_type,Subsample_fraction,n_folds,DistType) - - % Number of subjects - n_subjects = length(X); - - n_items = 0; - - frames_index = cell(1,n_subjects); - - for s = 1:n_subjects - frames_index{s} = n_items + (1:size(X{s},2)); - n_items = n_items + size(X{s},2); - end - - % Number of dimensions - n_dims = size(X{1},1); - - Consensus = zeros(n_items,n_items,length(K_range)); - Consensus_ordered = zeros(n_items,n_items,length(K_range)); - - % Loop over all K values to assess - for k = 1:length(K_range) - - disp(['Running consensus clustering for K = ',num2str(K_range(k)),'...']); - - % Connectivity matrix that will contain 0s or 1s depending on whether - % elements are clustered together or not - M = zeros(n_items,n_items,n_folds); - I = zeros(n_items,n_items,n_folds); - - % Loops over all the folds to perform clustering for - for h = 1:n_folds - - switch Subsample_type - case 'items' - - % Number of items to subsample - n_items_ss = floor(Subsample_fraction*n_items); - - % Does the subsampling - [X_ss,tmp_ss] = datasample((cell2mat(X))',n_items_ss,1,'Replace',false); - - % Vector - I_vec = zeros(n_items,1); - I_vec(tmp_ss) = 1; - - % Constructs the indicator matrix - for i = 1:length(I_vec) - for j = 1:length(I_vec) - if (I_vec(i) == I_vec(j)) && (I_vec(i) > 0) - I(i,j,h) = 1; - end - end - end - - case 'dims' - - % Number of dimensions to subsample - n_dims_ss = floor(Subsample_fraction*n_dims); - - % Does the subsampling - [X_ss,tmp_ss] = datasample((cell2mat(X))',n_dims_ss,2,'Replace',false); - - % Constructs the indicator matrix - I(:,:,h) = ones(n_items,n_items); - - case 'subjects' - - % Number of subjects to use in the subsampling - n_subjects_ss = floor(Subsample_fraction*n_subjects); - - tmp = datasample(1:n_subjects,n_subjects_ss,'Replace',false); - - % n_frames x n_voxels - X_ss = (cell2mat(X(tmp)))'; - tmp_ss = cell2mat(frames_index(tmp)); - - % Vector - I_vec = zeros(n_items,1); - I_vec(tmp_ss) = 1; - - % Constructs the indicator matrix - for i = 1:length(I_vec) - for j = 1:length(I_vec) - if (I_vec(i) == I_vec(j)) && (I_vec(i) > 0) - I(i,j,h) = 1; - end - end - end - - otherwise - errordlg('PROBLEM IN TYPE OF SUBSAMPLING'); - end - - % Does the clustering (for now, only with k-means), so that IDX - % contains the indices for each datapoint - IDX = kmeans(X_ss,K_range(k),'Distance',DistType,'Replicates',1,'Start','uniform'); - - % Builds the connectivity matrix - M(:,:,h) = Build_Connectivity_Matrix(IDX,tmp_ss,Subsample_type,n_items); - - clear I_vec - clear X_ss - clear tmp_ss - clear IDX - end - - % Constructs the consensus matrix for the considered K - Consensus(:,:,k) = sum(M,3)./sum(I,3); - - tree = linkage(squeeze(1-Consensus(:,:,k)),'average'); - - % Leaf ordering to create a nicely looking matrix - leafOrder = optimalleaforder(tree,squeeze(1-Consensus(:,:,k))); - - % Ordered consensus matrix - Consensus_ordered(:,:,k) = Consensus(leafOrder,leafOrder,k); - - clear leafOrder - clear Dist_vec - clear test - clear IDX - clear M - clear I - end -end \ No newline at end of file diff --git a/Analysis/CAP_FindDataType.m b/Analysis/CAP_FindDataType.m deleted file mode 100755 index 1ba9c08..0000000 --- a/Analysis/CAP_FindDataType.m +++ /dev/null @@ -1,17 +0,0 @@ -%% Finds the type of data that has been entered, using the file type of the -% matlab variable -function DataType = CAP_FindDataType(Data) - - if iscell(Data) - DataType = 'Data'; - elseif isfloat(Data) - DataType = 'Motion'; - elseif islogical(Data) - DataType = 'Mask'; - elseif isstruct(Data) - DataType = 'Info'; - else - DataType = 'Unknown'; - end - -end \ No newline at end of file diff --git a/Analysis/CAP_IsDataOK.m b/Analysis/CAP_IsDataOK.m deleted file mode 100755 index 7a8275f..0000000 --- a/Analysis/CAP_IsDataOK.m +++ /dev/null @@ -1,40 +0,0 @@ -%% This function checks if the data that have been entered are of consistant -% dimensions -function [IsOK,Problems] = CAP_IsDataOK(TC,FD,mask,BI) - - % By default, stuff is not OK - IsOK = 0; - - % If the dimensions of TC are consistant... - if size(TC,1) == 1 && size(TC,2) >... - 3 && sum(cell2mat(cellfun(@(x) size(x,1) == size(TC{1},1) && ... - size(x,2) == size(TC{1},2),TC,'un',0))) == size(TC,2) - - % If the dimensions of FD fit the dimensions of TC... - if size(FD,1) == size(TC{1},1) && size(FD,2) == size(TC,2) - - % If the dimensions of the mask fit the rest... - if size(mask,2) == 1 && sum(mask,1) == size(TC{1},2) - - % If the dimensions of the brain info are consistant... - if isfield(BI,'dim') && isfield(BI,'mat') && size(BI.mat,1) == 4 && size(BI.mat,2) == 4 && size(BI.dim,1) == 1 && size(BI.dim,2) == 3 - - % Then, everything is OK ! - IsOK = 1; - Problems = 'No problem !!'; - else - Problems = 'Inconsistant brain information dimensions'; - end - else - Problems = 'Inconsistant mask dimensions compared to time courses'; - end - else - Problems = 'Inconsistant dimensions between time courses and motion file'; - end - - else - Problems = 'Inconsistant time courses dimensions'; - end - - -end \ No newline at end of file diff --git a/Analysis/CAP_V2V.m b/Analysis/CAP_V2V.m deleted file mode 100755 index 5cabfac..0000000 --- a/Analysis/CAP_V2V.m +++ /dev/null @@ -1,41 +0,0 @@ -function Out=CAP_V2V(In,In_dim,In_mat,Out_dim,Out_mat) -% map voxels in the space of input volume to voxels in the space -% of output volumes -% Out_fn: uses only header info -% -% v1.0 Jonas Richiardi -% - initial release, based on code by Dimitri Van De Ville and Jonas -% Richiardi - -% Out is filled with zeros and has the size of the output file -Out=zeros(Out_dim); - -% generate all coordinates in output space -[x1,x2,x3]=ndgrid(1:Out_dim(1),1:Out_dim(2),1:Out_dim(3)); -idx=1:numel(Out); % map all voxels - -% take every voxel in the volume spanned by the output images, -% compute its real-world position in mm, then map input image - -oobList=zeros(0,4); % list of out-of-bound input voxels - -for iter=1:length(idx), - oob=false; - % recover world-space position of this voxel in mm from affine - % transform matrix - mm=Out_mat*[x1(idx(iter)) x2(idx(iter)) x3(idx(iter)) 1]'; - % convert this position into index of the closest structural voxel - vx=round(In_mat\[mm(1) mm(2) mm(3) 1]'); - vx(vx<=0)=1; - vxOri=vx; - % remap out-of-bounds voxels to last - if vx(1)>In_dim(1), vx(1)=In_dim(1); oob=true; end - if vx(2)>In_dim(2), vx(2)=In_dim(2); oob=true; end - if vx(3)>In_dim(3), vx(3)=In_dim(3); oob=true; end - if (oob==true), oobList(end+1,:)=vxOri; end - % idx(iter): current voxel - Out(idx(iter))=In(vx(1),vx(2),vx(3)); - if any(Out(idx(iter))<0) %Out - warning('mapV2V:negativeVal',['Negative voxel values at ' num2str(iter)]); - end -end; \ No newline at end of file diff --git a/Analysis/CAP_find_activity.m b/Analysis/CAP_find_activity.m deleted file mode 100644 index 34b3e77..0000000 --- a/Analysis/CAP_find_activity.m +++ /dev/null @@ -1,153 +0,0 @@ -%% Finds the moments of (de)activation in a group of fMRI subjects -% Inputs -% tcvox: cell aray with a seed signal in each cell (time points x masked voxels) -% seed: masks for the seeds used (masked voxels x n_seed) -% T: threshold for the retention of active or inactive frames -% FDall: framewise displacement traces for all subjects (time points x n_subjects) -% Mot_thresh: threshold (in mm) to use for scrubbing -% -% Outputs -% Xonp: cell array (each cell dimension masked voxels x n_retained_frames) -% for active frames brain patterns -% p: 5xn_subject matrix with percentages of scrubbed frames, scrubbed and -% active frames, scrubbed and inactive frames, retained frames for active -% analysis and retained frames for inactive analysis -function [Xonp,p,Indices,idx_sep_seeds,Xonp_scrub] = CAP_find_activity(tcvox,seed,T,FDall,Mot_thresh,SelMode,SeedType,SignMat,is_SS) - - % Contains the indices (logical) of the selected frames for each seed - % subcase - idx_sep_seeds = nan(size(FDall,1),size(FDall,2),size(seed,2)); - - % Computes the indices of the peak values that must be excluded - flag = FDall>Mot_thresh; - - % We want to store the indices of scrubbed frames and return it later - Indices.scrubbed = logical(flag); - - - % Same for the retained frames - switch SeedType - case 'Intersection' - Indices.kept.active = logical(zeros(size(FDall,1),size(FDall,2))); - Indices.scrubbedandactive = logical(zeros(size(FDall,1),size(FDall,2))); - otherwise - Indices.kept.active = logical(ones(size(FDall,1),size(FDall,2))); - Indices.scrubbedandactive = logical(ones(size(FDall,1),size(FDall,2))); - end - - % Each cell of flag will contain a time x 1 vector of logicals (1 if - % the frame is to be censored, 0 otherwise) - flag = num2cell(flag,1); - - % 1 x n_subject vector containing the percentage of scrubbed frames (throughout the whole scan) - p_scrubbed = cell2mat(cellfun(@(x) sum(x)/length(x)*100, flag,'un',0)); - - % If we wanted an average seed, we can either be in the - % subject-specific seed case (we know there is only one seed type), or - % in the case with several different seeds (up to three) to average - % together - if strcmp(SeedType,'Average') - - % According to the two options, we compute the seed time courses - % for each subject. - if ~is_SS - S = CAP_get_seed_traces(tcvox,logical(sum(seed,2)),SignMat(1,:),is_SS); - else - S = CAP_get_seed_traces(tcvox,seed,SignMat(1,:),is_SS); - end - - % Selects the indices appropriately (threshold or percentage - % selection cases) - if strcmp(SelMode,'Threshold') - - xindp = cellfun(@(x) x>T, S, 'un', 0); - - elseif strcmp(SelMode,'Percentage') - - T_per_act = cellfun(@(x) prctile(x,100-T), S, 'un', 0); - xindp = cellfun(@(x,y) x>y, S,T_per_act, 'un', 0); - - else - errordlg('Should never happen...'); - end - - % flag now contains the traces with high activity AND high motion - flag_active = cellfun(@(x,y) x & y,xindp, flag,'un',0); - - % Vector (1xn_subj) with the percentage of traces removed because of too high - % motion and being selected as active - p_scrubactive = cell2mat(cellfun(@(x) sum(x)/length(x)*100, flag_active,'un',0)); - - % My indices of active/inactive frames now contain only the non - % corrupted frames - xindp = cellfun(@(x,y) x & ~y, xindp,flag_active,'un',0); - - Indices.kept.active = cell2mat(xindp); - Indices.scrubbedandactive = cell2mat(flag_active); - idx_sep_seeds(:,:,1) = cell2mat(xindp); - - % If Union or Intersection was wanted instead, then computations for - % different seeds need to be carried independently - else - for idx = 1:size(seed,2) - - S = CAP_get_seed_traces(tcvox,logical(seed(:,idx)),SignMat(idx,:),is_SS); - - if strcmp(SelMode,'Threshold') - - % Computes the indexes at which we have a seed activity of interest - xindp = cellfun(@(x) x>T, S, 'un', 0); - - elseif strcmp(SelMode,'Percentage') - - % Computes the threshold that corresponds to P percent frames for - % each subject - T_per_act = cellfun(@(x) prctile(x,100-T), S, 'un', 0); - - % And then uses this to select frames - xindp = cellfun(@(x,y) x>y, S,T_per_act, 'un', 0); - - else - errordlg('Should never happen...'); - end - - % flag now contains the traces with high activity AND high motion - flag_active = cellfun(@(x,y) x & y,xindp, flag,'un',0); - - % Vector (1xn_subj) with the percentage of traces removed because of too high - % motion and being selected as active - p_scrubactive = cell2mat(cellfun(@(x) sum(x)/length(x)*100, flag_active,'un',0)); - - % My indices of active/inactive frames now contain only the non - % corrupted frames - xindp = cellfun(@(x,y) x & ~y, xindp,flag_active,'un',0); - - % Updates the indices of the frames to keep - switch SeedType - case 'Union' - - Indices.kept.active = (Indices.kept.active) & cell2mat(xindp); - Indices.scrubbedandactive = (Indices.scrubbedandactive) & cell2mat(flag_active); - - case 'Intersection' - Indices.scrubbedandactive = (Indices.scrubbedandactive) | cell2mat(flag_active); - Indices.kept.active = (Indices.kept.active) | cell2mat(xindp); - end - - idx_sep_seeds(:,:,idx) = cell2mat(xindp); - - end - end - - % Each cell contains the frames selected as active or as inactive (if - % inactive, the sign is reversed, i.e. inactivation is a positive - % signal). Size: masked voxels x n_retained_frames - Xonp = cellfun(@(x,y) x(y,:)',tcvox,num2cell(Indices.kept.active,1),'un',0); - Xonp_scrub = cellfun(@(x,y) x(y,:)',tcvox,num2cell(logical(Indices.scrubbedandactive),1),'un',0); - - % Percentage of active and inactive frames retained per subject - p_active = cell2mat(cellfun(@(x) size(x,2)/size(FDall,1)*100, Xonp,'un',0)); - - % Matrix containing all the interesting probabilities - p = [p_scrubbed; p_scrubactive; p_active]; -end \ No newline at end of file diff --git a/Analysis/CAP_find_activity_HCP.m b/Analysis/CAP_find_activity_HCP.m deleted file mode 100644 index 05be317..0000000 --- a/Analysis/CAP_find_activity_HCP.m +++ /dev/null @@ -1,167 +0,0 @@ -%% Finds the moments of (de)activation in a group of fMRI subjects -% Inputs -% tcvox: cell aray with a seed signal in each cell (time points x masked voxels) -% seed: masks for the seeds used (masked voxels x n_seed) -% T: threshold for the retention of active or inactive frames -% FDall: framewise displacement traces for all subjects (time points x n_subjects) -% Mot_thresh: threshold (in mm) to use for scrubbing -% -% Outputs -% Xonp: cell array (each cell dimension masked voxels x n_retained_frames) -% for active frames brain patterns -% p: 5xn_subject matrix with percentages of scrubbed frames, scrubbed and -% active frames, scrubbed and inactive frames, retained frames for active -% analysis and retained frames for inactive analysis -function [Xonp,p,Indices,idx_sep_seeds,flag_outofcuriosity,Xonp_full] = CAP_find_activity_HCP(tcvox,seed,T,FDall,Mot_thresh,SelMode,SeedType,SignMat,is_SS) - - % Contains the indices (logical) of the selected frames for each seed - % subcase - idx_sep_seeds = nan(size(FDall,1),size(FDall,2),size(seed,2)); - - % Computes the indices of the peak values that must be excluded. If we - % go above threshold for motion, we censor the current frame, but also - % the one just before and the one just after - flag = FDall>Mot_thresh; - flag2 = circshift(flag,[1 0]); - flag3 = circshift(flag,[-1 0]); - flag4 = circshift(flag,[-2 0]); - flag5 = circshift(flag,[-3 0]); - flag6 = circshift(flag,[-4 0]); - flag7 = circshift(flag,[2 0]); - flag8 = circshift(flag,[3 0]); - flag9 = circshift(flag,[4 0]); - flag10 = circshift(flag,[5 0]); - flag11 = circshift(flag,[6 0]); - flag12 = circshift(flag,[7 0]); - flag = flag+flag2+flag3+flag4+flag5+flag6+flag7+flag8+flag9+flag10+flag11+flag12; - flag_outofcuriosity = flag; - flag(flag>1)=1; - - % We want to store the indices of scrubbed frames and return it later - Indices.scrubbed = logical(flag); - - % Same for the retained frames - switch SeedType - case 'Intersection' - Indices.kept.active = logical(zeros(size(FDall,1),size(FDall,2))); - otherwise - Indices.kept.active = logical(ones(size(FDall,1),size(FDall,2))); - end - - - % Each cell of flag will contain a time x 1 vector of logicals (1 if - % the frame is to be censored, 0 otherwise) - flag = num2cell(flag,1); - clear flag2 - clear flag3 - - % 1 x n_subject vector containing the percentage of scrubbed frames (throughout the whole scan) - p_scrubbed = cell2mat(cellfun(@(x) sum(x)/length(x)*100, flag,'un',0)); - - % If we wanted an average seed, we can either be in the - % subject-specific seed case (we know there is only one seed type), or - % in the case with several different seeds (up to three) to average - % together - if strcmp(SeedType,'Average') - - % According to the two options, we compute the seed time courses - % for each subject. - if ~is_SS - S = CAP_get_seed_traces(tcvox,logical(sum(seed,2)),SignMat(1,:),is_SS); - else - S = CAP_get_seed_traces(tcvox,seed,SignMat(1,:),is_SS); - end - - % Selects the indices appropriately (threshold or percentage - % selection cases) - if strcmp(SelMode,'Threshold') - - xindp = cellfun(@(x) x>T, S, 'un', 0); - - elseif strcmp(SelMode,'Percentage') - - T_per_act = cellfun(@(x) prctile(x,100-T), S, 'un', 0); - xindp = cellfun(@(x,y) x>y, S,T_per_act, 'un', 0); - - else - errordlg('Should never happen...'); - end - - % flag now contains the traces with high activity AND high motion - flag_active = cellfun(@(x,y) x & y,xindp, flag,'un',0); - - % Vector (1xn_subj) with the percentage of traces removed because of too high - % motion and being selected as active - p_scrubactive = cell2mat(cellfun(@(x) sum(x)/length(x)*100, flag_active,'un',0)); - - % My indices of active/inactive frames now contain only the non - % corrupted frames - xindp = cellfun(@(x,y) x & ~y, xindp,flag_active,'un',0); - - Indices.kept.active = cell2mat(xindp); - idx_sep_seeds(:,:,1) = cell2mat(xindp); - - % If Union or Intersection was wanted instead, then computations for - % different seeds need to be carried independently - else - for idx = 1:size(seed,2) - - S = CAP_get_seed_traces(tcvox,logical(seed(:,idx)),SignMat(idx,:),is_SS); - - if strcmp(SelMode,'Threshold') - - % Computes the indexes at which we have a seed activity of interest - xindp = cellfun(@(x) x>T, S, 'un', 0); - - elseif strcmp(SelMode,'Percentage') - - % Computes the threshold that corresponds to P percent frames for - % each subject - T_per_act = cellfun(@(x) prctile(x,100-T), S, 'un', 0); - - % And then uses this to select frames - xindp = cellfun(@(x,y) x>y, S,T_per_act, 'un', 0); - - else - errordlg('Should never happen...'); - end - - % flag now contains the traces with high activity AND high motion - flag_active = cellfun(@(x,y) x & y,xindp, flag,'un',0); - - % Vector (1xn_subj) with the percentage of traces removed because of too high - % motion and being selected as active - p_scrubactive = cell2mat(cellfun(@(x) sum(x)/length(x)*100, flag_active,'un',0)); - - % My indices of active/inactive frames now contain only the non - % corrupted frames - xindp = cellfun(@(x,y) x & ~y, xindp,flag_active,'un',0); - - % Updates the indices of the frames to keep - switch SeedType - case 'Union' - - Indices.kept.active = (Indices.kept.active) & cell2mat(xindp); - - case 'Intersection' - - Indices.kept.active = (Indices.kept.active) | cell2mat(xindp); - end - - idx_sep_seeds(:,:,idx) = cell2mat(xindp); - - end - end - - % Each cell contains the frames selected as active or as inactive (if - % inactive, the sign is reversed, i.e. inactivation is a positive - % signal). Size: masked voxels x n_retained_frames - Xonp = cellfun(@(x,y) x(y,:)',tcvox,num2cell(Indices.kept.active,1),'un',0); - Xonp_full = cellfun(@(x,y) x(y,:)',tcvox,num2cell(logical(Indices.kept.active+Indices.scrubbed),1),'un',0); - - % Percentage of active and inactive frames retained per subject - p_active = cell2mat(cellfun(@(x) size(x,2)/size(FDall,1)*100, Xonp,'un',0)); - - % Matrix containing all the interesting probabilities - p = [p_scrubbed; p_scrubactive; p_active]; -end \ No newline at end of file diff --git a/Analysis/CAP_get_seed_traces.m b/Analysis/CAP_get_seed_traces.m deleted file mode 100755 index 62ddd93..0000000 --- a/Analysis/CAP_get_seed_traces.m +++ /dev/null @@ -1,40 +0,0 @@ -%% From the raw voxelwise data, computes a spatially averaged seed signal -% tcvox is a cell array (each cell has time x masked voxels dimension) -% seed is the seed mask (masked voxel x 1) -% S is a cell array (each cell is a time x 1 vector) -function [S] = CAP_get_seed_traces(tcvox,seed,SignMat,is_SS) - - % In the case when we have the same seed across subjects, we use it - % throughout for the computations - if ~is_SS - if SignMat(1) - % Computation of the spatially averaged seed signal - S = cellfun(@(x) mean(x(:,seed),2), tcvox, 'un', 0); - elseif SignMat(2) - % Computation of the spatially averaged seed signal - S = cellfun(@(x) (-1)*mean(x(:,seed),2), tcvox, 'un', 0); - else - errordlg('PROBLEM WITH SIGN MAT !!!'); - end - - % In the case when we have subject-specific seeds, we will compute the - % seed signal separately for each subject - else - - seed_cell = {}; - - for s = 1:size(seed,2) - seed_cell{s} = seed(:,s); - end - - if SignMat(1) - % Computation of the spatially averaged seed signal - S = cellfun(@(x,y) mean(x(:,y),2), tcvox, seed_cell, 'un', 0); - elseif SignMat(2) - % Computation of the spatially averaged seed signal - S = cellfun(@(x,y) (-1)*mean(x(:,y),2), tcvox, seed_cell, 'un', 0); - else - errordlg('PROBLEM WITH SIGN MAT !!!'); - end - end -end \ No newline at end of file diff --git a/Analysis/CAP_mask4kmeans.m b/Analysis/CAP_mask4kmeans.m deleted file mode 100755 index 5e9b035..0000000 --- a/Analysis/CAP_mask4kmeans.m +++ /dev/null @@ -1,53 +0,0 @@ -%% This function sets to zero the value of the voxels that are judged noise -% X is the data to mask (n_vox x n_frames) -% topP and bottomP are the percentages of data retained (top and lowest -% activity ones respectively) by the masking procedure -% n_cv is the number of voxels in a group that must be reached for that -% group not to be masked -function [Y] = CAP_mask4kmeans(X,topP,bottomP,n_cv,mask,ai) - - % We want to exit with a matrix of the same size as X, for which each - % frame has had its noise-related elements set to zero - Y = zeros(size(X)); - - % For all frames... - for i = 1:size(X,2) - % We sort in descending and ascending order to get the indexes of - % the considered frame matching the top percentage of high or low - % activity - [~,Isorted] = sort(X(:,i),'descend'); - [~,Isortedrev] = sort(X(:,i),'ascend'); - - % Contains the indexes of all the points with activity of interest, - % both high and low - I = [Isorted(1:round(topP/100*length(Isorted)))',Isortedrev(1:round(bottomP/100*length(Isortedrev)))']; - - % If the elements of the frame belong to the indexes, X_binary has - % the corresponding entry set to 1. Else, it is set to 0 - X_binary = ismember(1:length(X(:,i)),I); - - % To perform the opening operation, we must convert X_binary to a - % 3D volume - temp = nan(size(mask)); - temp(mask) = X_binary; - temp(isnan(temp)) = 0; - temp = reshape(temp,ai.dim); - - % At this state, temp is a binary image on which the small clusters - % of neigboring voxels retained (less than n_cv neighbours) have - % been removed - temp = bwareaopen(temp,n_cv); - - % We convert temp back to a 1D vector, and conserve only the - % elements of interest (70700 voxels) with 0 and 1 respectively - % denoting 'not to consider for k-means' and 'to consider for - % k-means' - temp = temp(:); - Y(:,i) = temp(mask); - end - - % Y is a binary matrix of same size as X; multiplying element by - % element, we set to zero the elements of X that we want to neglect for - % clustering - Y = Y.*X; -end \ No newline at end of file diff --git a/Analysis/ComputeClusteringQuality.m b/Analysis/ComputeClusteringQuality.m deleted file mode 100755 index 4259d93..0000000 --- a/Analysis/ComputeClusteringQuality.m +++ /dev/null @@ -1,57 +0,0 @@ -function [CDF,Lorena] = ComputeClusteringQuality(Consensus,K_range) - - % Number of K values to check - K = size(Consensus,3); - - % Number of pairs of frames - n_items = size(Consensus,1); - - % Creates the CDF range - c = 0:0.005:1; - - % Quality criterion computations are run for each explored K value... - for k = 1:K - - % Sorted consensus entries - Cons_val = sort(jUpperTriMatToVec(squeeze(Consensus(:,:,k))),'ascend'); - - % Computation of CDF - for i = 1:length(c) - CDF(k,i) = sum(Cons_val <= c(i))/(n_items*(n_items-1)/2); - end - - - idx_dp = 1; - - for delta_perc = 0.5:0.5:10 - - Lorena(k,idx_dp) = prctile(CDF(k,:),100-delta_perc) - prctile(CDF(k,:),delta_perc); - idx_dp = idx_dp + 1; - end - - - - - % Computation of the AUC -% AUC(k) = 0; -% -% for i = 2:(n_items*(n_items-1)/2) -% AUC(k) = AUC(k) + (Cons_val(i)-Cons_val(i-1))* interp1q(c',CDF(k,:)',Cons_val(i)); -% end - - clear Cons_val - end - -% for k = 2:K -% -% % Computation of Delta -% -% tmp_max_AUC = max(AUC(1:k-1)); -% -% Delta(k) = (AUC(k) - tmp_max_AUC)/tmp_max_AUC; -% -% end -% -% Delta(1) = AUC(1); - -end \ No newline at end of file diff --git a/Analysis/Compute_Metrics.m b/Analysis/Compute_Metrics.m deleted file mode 100644 index 1b32d30..0000000 --- a/Analysis/Compute_Metrics.m +++ /dev/null @@ -1,140 +0,0 @@ -%% This function computes all the metrics of the CAP analysis framework -% Inputs: -% -% - idx depicts the indices of the frames that have been clustered (the -% cluster to which they belong) -% - xindp1 depicts the indices of the frames that have been selected as -% activation moments -% - xindn1 is the same for deactivation time points -% - sind depicts scrubbed frames -% - n_clusters is the number of clusters used for state disentanglement -% - TR is the TR of the experiment -% - CAPType denotes the type of clustering that has been chosen (only -% activation, only deactivation, or both) -% -% Outputs: -% -% - TPM (n_subjects x n_frames) is the state sequence matrix -% - Counts (-> raw/frac -> scrubbed/baseline/state: n_subj x n_states) -% contains the counts (raw and normalized) -% - Number (n_subj x n_seqtype) with sequence type: scrub, act/deact/-, -% baseline, then states -% - Avg_Duration (n_subj x n_seqtype) contains the average duration of a -% state for a subject -% - Duration (n_subj array of size 1 x n_sequences) contains the duration -% of the state sequences for each subject -% - TM (n_seqtype x n_seqtype) encompasses the transitions that exist -function [Metrics,Metrics_ext] = ... - Compute_Metrics(idx,xindp1,sind,n_clusters,TR,CAP,X_extended,T_assign,d) - - %% Declaration of parameters - - % Number of subjects - n = size(xindp1,2); - - % Number of frames (full) - n_frames = size(xindp1,1); - - % Cumulative frame counts across subjects (1xn_subjects) for frames - dd2p = cumsum([1,sum(xindp1,1)]); - - % TPM will contain the sequence of states for all subjects - TPM = zeros(n,n_frames); - - % Sequence of states if we try to also match the scrubbed frames to the - % CAPs - TPM_extended = zeros(n,n_frames); - - - % Filling of TP for each subject - for i = 1:n - - % Indices of the CAPs to which scrubbed frames would belong for the - % subject of interest (in is, a vector) - is = CAP_AssignFrames(CAP,X_extended{i},d,T_assign); - - % We set scrubbed time points at -1 if we want to discard scrubbed - % frames; else, we set them to the retrieved indices (and -1 - % denotes unassigned cases) - TPM(i,xindp1(:,i)) = idx(dd2p(i):dd2p(i+1)-1)'; - TPM_extended(i,xindp1(:,i)) = idx(dd2p(i):dd2p(i+1)-1)'; - - TPM(i,sind(:,i)) = -1; - - if ~isempty(is) - is(is > n_clusters) = -1; - TPM_extended(i,sind(:,i)) = is; - end - - end - - % Computation of the counts - [Counts,Duration,Number,Avg_Duration,TM] = CAP_ComputeMetrics(TPM,n_clusters+1,TR,n_frames); - [Counts_ext,Duration_ext,Number_ext,Avg_Duration_ext,TM_ext] = CAP_ComputeMetrics(TPM_extended,n_clusters+1,TR,n_frames); - - %% Computation of novel metrics - - % I want to consider: all my K CAPs, and the baseline state - TP = TM(3:end-1,3:end-1,:); - TP_ext = TM_ext(3:end-1,3:end-1,:); - - % How often do I reach a CAP from baseline - From_Baseline = squeeze(TM(2,3:end-1,:))'; - From_Baseline_ext = squeeze(TM_ext(2,3:end-1,:))'; - - % How often do Ireach the baseline from a CAP - To_Baseline = squeeze(TM(3:end-1,2,:))'; - To_Baseline_ext = squeeze(TM_ext(3:end-1,2,:))'; - - % How long do I stay in baseline - Baseline_resilience = squeeze(TM(2,2,:)); - Baseline_resilience_ext = squeeze(TM_ext(2,2,:)); - - % First, I can compute the probability to stay within a given state; I - % then set the probability to 0 for my next computations - for s = 1:size(TP,3) - for k = 1:n_clusters - - Resilience(s,k) = TP(k,k,s); - Resilience_ext(s,k) = TP_ext(k,k,s); - - TP(k,k,s) = 0; - TP_ext(k,k,s) = 0; - end - - Betweenness(s,:) = betweenness_wei(squeeze(TP(:,:,s))); - [kin(s,:),kout(s,:)] = degrees_dir(squeeze(TP(:,:,s))); - - Betweenness_ext(s,:) = betweenness_wei(squeeze(TP_ext(:,:,s))); - [kin_ext(s,:),kout_ext(s,:)] = degrees_dir(squeeze(TP_ext(:,:,s))); - end - - Metrics.From_Baseline = From_Baseline; - Metrics.To_Baseline = To_Baseline; - Metrics.Baseline_resilience = Baseline_resilience; - Metrics.Resilience = Resilience; - Metrics.TP = TP; - Metrics.Betweenness = Betweenness; - Metrics.kin = kin; - Metrics.kout = kout; - Metrics.TPM = TPM; - Metrics.Counts = Counts; - Metrics.Duration = Duration; - Metrics.Avg_Duration = Avg_Duration; - Metrics.Number = Number; - - Metrics_ext.From_Baseline = From_Baseline_ext; - Metrics_ext.To_Baseline = To_Baseline_ext; - Metrics_ext.Baseline_resilience = Baseline_resilience_ext; - Metrics_ext.Resilience = Resilience_ext; - Metrics_ext.TP = TP_ext; - Metrics_ext.Betweenness = Betweenness_ext; - Metrics_ext.kin = kin_ext; - Metrics_ext.kout = kout_ext; - Metrics_ext.TPM = TPM_extended; - Metrics_ext.Counts = Counts_ext; - Metrics_ext.Duration = Duration_ext; - Metrics_ext.Avg_Duration = Avg_Duration_ext; - Metrics_ext.Number = Number_ext; - -end \ No newline at end of file diff --git a/Analysis/Compute_Metrics_V2.m b/Analysis/Compute_Metrics_V2.m deleted file mode 100644 index beb1bbb..0000000 --- a/Analysis/Compute_Metrics_V2.m +++ /dev/null @@ -1,41 +0,0 @@ -function [TPM,TM] = Compute_Metrics_V2(idx,xindp1,sind,n_clusters,TR) - - % Number of subjects - n = size(xindp1,2); - - % Number of frames - n_frames = size(xindp1,1); - - % This will contain our transitions - TM = zeros(n_clusters+2,n_clusters+2,n); - - % Cumulative frame counts across subjects (1xn_subjects) for frames - dd2p = cumsum([1,sum(xindp1,1)]); - - % TPM will contain the sequence of states for all subjects - TPM = zeros(n,n_frames); - - % Computation of the state matrix - - % Filling of TP for each subject - for i = 1:n - % We set scrubbed time points at -1 - TPM(i,xindp1(:,i)) = idx(dd2p(i):dd2p(i+1)-1)'; - TPM(i,sind(:,i)) = -1; - end - - % We now want to compute dynamical metrics for each subject - for j = 1:n - - % For each state transition, we increment properly the matrix of - % transitions - for k = 1:n_frames-1 - TM(TPM(j,k)+2,TPM(j,k+1)+2,j) = TM(TPM(j,k)+2,TPM(j,k+1)+2,j) + 1; - end - end - - % We normalize the matrix by all the transitions - TM = TM/(n_frames-1); - - -end \ No newline at end of file diff --git a/Analysis/Compute_Metrics_simpler.m b/Analysis/Compute_Metrics_simpler.m deleted file mode 100644 index c580c63..0000000 --- a/Analysis/Compute_Metrics_simpler.m +++ /dev/null @@ -1,87 +0,0 @@ -%% This function computes all the metrics of the CAP analysis framework -% Inputs: -% -% - idx depicts the indices of the frames that have been clustered (the -% cluster to which they belong) -% - xindp1 depicts the indices of the frames that have been selected as -% activation moments -% - xindn1 is the same for deactivation time points -% - sind depicts scrubbed frames -% - n_clusters is the number of clusters used for state disentanglement -% - TR is the TR of the experiment -% - CAPType denotes the type of clustering that has been chosen (only -% activation, only deactivation, or both) -% -% Outputs: -% -% - TPM (n_subjects x n_frames) is the state sequence matrix -% - Counts (-> raw/frac -> scrubbed/baseline/state: n_subj x n_states) -% contains the counts (raw and normalized) -% - Number (n_subj x n_seqtype) with sequence type: scrub, act/deact/-, -% baseline, then states -% - Avg_Duration (n_subj x n_seqtype) contains the average duration of a -% state for a subject -% - Duration (n_subj array of size 1 x n_sequences) contains the duration -% of the state sequences for each subject -% - TM (n_seqtype x n_seqtype) encompasses the transitions that exist -function [TPM,Counts,Number,Avg_Duration,Duration,TM,From_Baseline,... - To_Baseline,Baseline_resilience,Resilience,Betweenness,kin,kout] = ... - Compute_Metrics_simpler(idx,xindp1,sind,n_clusters,TR) - - %% Declaration of parameters - - % Number of subjects - n = size(xindp1,2); - - % Number of frames (full) - n_frames = size(xindp1,1); - - % Cumulative frame counts across subjects (1xn_subjects) for frames - dd2p = cumsum([1,sum(xindp1,1)]); - - % TPM will contain the sequence of states for all subjects - TPM = zeros(n,n_frames); - - % Filling of TP for each subject - for i = 1:n - - % We set scrubbed time points at -1 if we want to discard scrubbed - % frames; else, we set them to the retrieved indices (and -1 - % denotes unassigned cases) - TPM(i,xindp1(:,i)) = idx(dd2p(i):dd2p(i+1)-1)'; - TPM(i,sind(:,i)) = -1; - end - - % Computation of the counts - [Counts,Duration,Number,Avg_Duration,TM] = CAP_ComputeMetrics(TPM,n_clusters+1,TR,n_frames); - - %% Computation of novel metrics - - % I want to consider: all my K CAPs, and the baseline state - TP = TM(3:end-1,3:end-1,:); - - % How often do I reach a CAP from baseline - From_Baseline = squeeze(TM(2,3:end-1,:))'; - - % How often do Ireach the baseline from a CAP - To_Baseline = squeeze(TM(3:end-1,2,:))'; - - % How long do I stay in baseline - Baseline_resilience = squeeze(TM(2,2,:)); - - % First, I can compute the probability to stay within a given state; I - % then set the probability to 0 for my next computations - for s = 1:size(TP,3) - for k = 1:n_clusters - - Resilience(s,k) = TP(k,k,s); - - TP(k,k,s) = 0; - end - - Betweenness(s,:) = betweenness_wei(squeeze(TP(:,:,s))); - [kin(s,:),kout(s,:)] = degrees_dir(squeeze(TP(:,:,s))); - - end - -end \ No newline at end of file diff --git a/Analysis/ConsensusClustering.m b/Analysis/ConsensusClustering.m deleted file mode 100755 index 67195f7..0000000 --- a/Analysis/ConsensusClustering.m +++ /dev/null @@ -1,106 +0,0 @@ -%% This function performs consensus clustering over a range of K values -% The goal is to provide a measure of how good each value of K is -% -% Inputs: -% - X is the data matrix (n_DP x n_DIM) -% - K_range is the range of K values to examine -% - Subsample_type defines how subsampling is done: across items (data -% points) if 'items', and across dimensions if 'dimensions' -% - Subsample_fraction is the fraction of the original data points, or -% dimensions, to keep for a given fold -% - n_folds is the number of folds over which to run -function [Consensus_ordered] = ConsensusClustering(X,K_range,Subsample_type,Subsample_fraction,n_folds,DistType) - - % Number of data points - n_items = size(X,1); - - % Number of dimensions - n_dims = size(X,2); - - Consensus = zeros(n_items,n_items,length(K_range)); - Consensus_ordered = zeros(n_items,n_items,length(K_range)); - - % Loop over all K values to assess - for k = 1:length(K_range) - - disp(['Running consensus clustering for K = ',num2str(K_range(k)),'...']); - - % Connectivity matrix that will contain 0s or 1s depending on whether - % elements are clustered together or not - M = zeros(n_items,n_items,n_folds); - I = zeros(n_items,n_items,n_folds); - - disp('before h loop'); - - % Loops over all the folds to perform clustering for - for h = 1:n_folds - - switch Subsample_type - case 'items' - - % Number of items to subsample - n_items_ss = floor(Subsample_fraction*n_items); - - % Does the subsampling - [X_ss,tmp_ss] = datasample(X,n_items_ss,1,'Replace',false); - - % Vector - I_vec = zeros(n_items,1); - I_vec(tmp_ss) = 1; - - % Constructs the indicator matrix - for i = 1:length(I_vec) - for j = 1:length(I_vec) - if (I_vec(i) == I_vec(j)) && (I_vec(i) > 0) - I(i,j,h) = 1; - end - end - end - - case 'dims' - - % Number of dimensions to subsample - n_dims_ss = floor(Subsample_fraction*n_dims); - - % Does the subsampling - [X_ss,tmp_ss] = datasample(X,n_dims_ss,2,'Replace',false); - - % Constructs the indicator matrix - I(:,:,h) = ones(n_items,n_items); - - otherwise - errordlg('PROBLEM IN TYPE OF SUBSAMPLING'); - end - - % Does the clustering (for now, only with k-means), so that IDX - % contains the indices for each datapoint - IDX = kmeans(X_ss,K_range(k),'Distance',DistType,'Replicates',1,'Start','uniform'); - - % Builds the connectivity matrix - M(:,:,h) = Build_Connectivity_Matrix(IDX,tmp_ss,Subsample_type,n_items); - - clear I_vec - clear X_ss - clear tmp_ss - clear IDX - end - - % Constructs the consensus matrix for the considered K - Consensus(:,:,k) = sum(M,3)./sum(I,3); - - tree = linkage(squeeze(1-Consensus(:,:,k)),'average'); - - % Leaf ordering to create a nicely looking matrix - leafOrder = optimalleaforder(tree,squeeze(1-Consensus(:,:,k))); - - % Ordered consensus matrix - Consensus_ordered(:,:,k) = Consensus(leafOrder,leafOrder,k); - - clear leafOrder - clear Dist_vec - clear test - clear IDX - clear M - clear I - end -end \ No newline at end of file diff --git a/Analysis/Run_Clustering.m b/Analysis/Run_Clustering.m deleted file mode 100755 index e812dff..0000000 --- a/Analysis/Run_Clustering.m +++ /dev/null @@ -1,121 +0,0 @@ -function [CP2,Disp,Std_Clusters,idx,d,sfrac] = Run_Clustering(XONn,n_clusters,mask,brain_info,maskP,maskN,n_rep,idx_sep_seeds,SeedType) - - % Number of seeds - n_seeds = size(idx_sep_seeds,3); - - % Number of subjects - n_subjects = size(idx_sep_seeds,2); - - % Number of possible seed combinations - switch n_seeds - case 1 - n_combos = 1; - combvec = [1]; - case 2 - n_combos = 3; - combvec = [1 0; 0 1; 1 1]; - case 3 - n_combos = 7; - combvec = [1 0 0; 0 1 0; 0 0 1; 1 1 0; 0 1 1; 1 0 1; 1 1 1]; - otherwise - errordlg('PROBLEM AT SEED FRACTIONS'); - end - - % Will contain the fraction of frames linked to a given seed (if using - % the 'Intersection' method) - sfrac = zeros(n_subjects,n_clusters,n_combos); - - % 'Filtering so that we only use the largest activation and deactivation - % spots for clustering - XONn_filtered = CAP_mask4kmeans(XONn,maskP,maskN,6,mask,brain_info); - - % Rows datapoints, columns variable (so here every 70700 activation is - % a datapoint) - % idx will contain 1462 elements (the index of the cluster to which the - % considered datapoint belongs - [idx,CP] = kmeans(XONn_filtered',n_clusters,'distance','correlation','replicates',n_rep,'empty','drop','maxiter',100,'Display','iter'); - - % idx2counts is of size K (number of clusters) and has the number of - % datapoints classified within a given cluster) - - % disp('idx2counts:'); - idx2counts = histc(idx, 1:max(idx)); - - % Output = Input(IX) - [~,IX] = sort(idx2counts,'descend'); - - % Size Kx70700 (location of each cluster); clusters are put with 'the - % most prominent one first' - CP = CP(IX,:); idx2 = idx; % order by occurrence - - % Changes the datapoint indexes so that they fit the new clusters order - for l=1:max(idx), idx2(idx==IX(l))=l; end - idx=idx2; - - CP2 = zeros(n_clusters,size(CP,2)); - Disp = zeros(1,n_clusters); - Std_Clusters = zeros(size(CP,2),n_clusters); - - % For each cluster index - for l=1:max(idx) - % Averages all data points belonging to one specific cluster, and - % stores the obtained pattern as a cell in CP2 - CP2(l,:) = mean(XONn(:,idx==l),2); %./ ( std(XON(:,idx==l),[],2) ) * sqrt(length(idx==l)); % Liu&Duyn - - % Measure of dispersion within the cluster considered - Disp(l) = mean(corr(CP2(l,:)',XONn(:,idx==l))); - - Std_Clusters(:,l) = std(XONn(:,idx==l),[],2); - end - - % d contains the correlation values of all frames to the CAPs - r = corr(CP2',XONn); - - d = zeros(1,length(idx)); - for k=1:max(idx) - d(idx==k) = r(k,idx==k); - end - - % Added part to compute the fraction of frames assigned to a given seed - % if using the intersection option (in which a data point is retained - % as long as at least one seed region becomes significantly (de)active) - if strcmp(SeedType,'Intersection') - - % idx_all will contain the clustering indices as put on a whole - % temporal scale (time x subjects) - idx_all = zeros(size(idx_sep_seeds,1),n_subjects); - - % Index to properly fill in the matrix by adding up the number of - % frames per subject every time - tmp_loc = 1; - - - for s = 1:n_subjects - tmp = sum(squeeze(idx_sep_seeds(:,s,:)),2); - tmp(tmp >= 1) = 1; - tmp = logical(tmp); - idx_all(tmp,s) = idx(tmp_loc:(tmp_loc+sum(tmp)-1)); - tmp_loc = tmp_loc + sum(tmp); - end - - - - % I will compute my fractions for each seed combination of - % interest; for example, 3 seeds would yield 7 possible - % combinations - for s = 1:n_subjects - for t = 1:size(idx_sep_seeds,1) - - tmp = squeeze(idx_sep_seeds(t,s,:)); - - % If there is at least one seed active or deactive at - % the point of interest, we update the sfrac count at the - % appropriate CAP - if sum(tmp) > 0 - sfrac(s,idx_all(t,s),find(ismember(combvec,tmp','rows'))) = ... - sfrac(s,idx_all(t,s),find(ismember(combvec,tmp','rows'))) + 1; - end - end - end - end -end \ No newline at end of file diff --git a/Analysis/circular_arrow.m b/Analysis/circular_arrow.m deleted file mode 100644 index ae40811..0000000 --- a/Analysis/circular_arrow.m +++ /dev/null @@ -1,137 +0,0 @@ -function circular_arrow(figHandle, radius, centre, arrow_angle, angle, direction, colour, head_size, head_style) -% This is a function designed to draw a circular arrow onto the current -% figure. It is required that "hold on" must be called before calling this -% function. -% -% The correct calling syntax is: -% circular_arrow(height, centre, angle, direction, colour, head_size) -% where: -% figHandle - the handle of the figure to be drawn on. -% radius - the radius of the arrow. -% centre - a vector containing the desired centre of the circular -% arrow. -% arrow_angle - the desired orientation angle of the circular arrow. -% This is measured in degrees counter-clockwise -% angle - the angle between starting and end point of the arrow in -% degrees. -% direction - variable set to determine format of arrow head. Use 1 -% to get a clockwise arrow, -1 to get a counter clockwise -% arrow, 2 to get a double headed arrow and 0 to get just -% an arc. -% colour (optional) - the desired colour of the arrow, using Matlab's -% Color Specification. -% head_size (optional) - the size of the arrow head. -% head_style (optional) - the style of the arrow head. -% For more information, see Annotation Arrow Properties. - -%Ensure proper number of arguments -if (nargin < 6)||(nargin > 9) - error(['Wrong number of parameters '... - 'Enter "help circular_arrow" for more information']); -end - -% arguments 7, 8 and 9 are optional, -if nargin < 9 - head_style = 'vback2'; -end -if nargin < 8 - head_size = 10; -end -if nargin < 7 - colour = 'k'; -end - -% display a warning if the headstyle has been specified, but direction has -% been set to no heads -if nargin == 9 && direction == 0 - warning(['Head style specified, but direction set to 0! '... - 'This will result in no arrow head being displayed.']); -end - - -% Check centre is vector with two points -[m,n] = size(centre); -if m*n ~= 2 - error('Centre must be a two element vector'); -end - -arrow_angle = deg2rad(arrow_angle); % Convert angle to rad -angle = deg2rad(angle); % Convert angle to rad -xc = centre(1); -yc = centre(2); - -% Creating (x, y) values that are in the positive direction along the x -% axis and the same height as the centre -x_temp = centre(1) + radius; -y_temp = centre(2); - -% Creating x & y values for the start and end points of arc -x1 = (x_temp-xc)*cos(arrow_angle+angle/2) - ... - (y_temp-yc)*sin(arrow_angle+angle/2) + xc; -x2 = (x_temp-xc)*cos(arrow_angle-angle/2) - ... - (y_temp-yc)*sin(arrow_angle-angle/2) + xc; -x0 = (x_temp-xc)*cos(arrow_angle) - ... - (y_temp-yc)*sin(arrow_angle) + xc; -y1 = (x_temp-xc)*sin(arrow_angle+angle/2) + ... - (y_temp-yc)*cos(arrow_angle+angle/2) + yc; -y2 = (x_temp-xc)*sin(arrow_angle-angle/2) + ... - (y_temp-yc)*cos(arrow_angle-angle/2) + yc; -y0 = (x_temp-xc)*sin(arrow_angle) + ... - (y_temp-yc)*cos(arrow_angle) + yc; - -% Plotting twice to get angles greater than 180 -i = 1; - -% Creating points -P1 = struct([]); -P2 = struct([]); -P1{1} = [x1;y1]; % Point 1 - 1 -P1{2} = [x2;y2]; % Point 1 - 2 -P2{1} = [x0;y0]; % Point 2 - 1 -P2{2} = [x0;y0]; % Point 2 - 1 -centre = [xc;yc]; % guarenteeing centre is the right dimension -n = 1000; % The number of points in the arc -v = struct([]); - -while i < 3 - - v1 = P1{i}-centre; - v2 = P2{i}-centre; - c = det([v1,v2]); % "cross product" of v1 and v2 - a = linspace(0,atan2(abs(c),dot(v1,v2)),n); % Angle range - v3 = [0,-c;c,0]*v1; % v3 lies in plane of v1 and v2 and is orthog. to v1 - v{i} = v1*cos(a)+((norm(v1)/norm(v3))*v3)*sin(a); % Arc, center at (0,0) - plot(v{i}(1,:)+xc,v{i}(2,:)+yc,'Color', colour) % Plot arc, centered at P0 - - i = i + 1; - -end - -position = struct([]); - -% Setting x and y for CW and CCW arrows -if direction == 1 - position{1} = [x2 y2 x2-(v{2}(1,2)+xc) y2-(v{2}(2,2)+yc)]; -elseif direction == -1 - position{1} = [x1 y1 x1-(v{1}(1,2)+xc) y1-(v{1}(2,2)+yc)]; -elseif direction == 2 - position{1} = [x2 y2 x2-(v{2}(1,2)+xc) y2-(v{2}(2,2)+yc)]; - position{2} = [x1 y1 x1-(v{1}(1,2)+xc) y1-(v{1}(2,2)+yc)]; -elseif direction == 0 - % Do nothing -else - error('direction flag not 1, -1, 2 or 0.'); -end - -% Loop for each arrow head -i = 1; -while i < abs(direction) + 1 - h=annotation('arrow'); % arrow head - set(h,'parent', gca, 'position', position{i}, ... - 'HeadLength', head_size, 'HeadWidth', head_size,... - 'HeadStyle', head_style, 'linestyle','none','Color', colour); - - i = i + 1; -end \ No newline at end of file diff --git a/Analysis/jUpperTriMatToVec.m b/Analysis/jUpperTriMatToVec.m deleted file mode 100755 index 693b942..0000000 --- a/Analysis/jUpperTriMatToVec.m +++ /dev/null @@ -1,32 +0,0 @@ -function v=jUpperTriMatToVec(m,varargin) -% converts the upper-triangular part of a matrix to a vector -% -% IN: -% m: matrix -% offset: offset above leading diagonal, fed to triu function -% OUT: -% v: vector of upper-triangular values -% -% v1.0 Oct 2009 Jonas Richiardi -% - initial release -% v1.1 Feb 2015 Dimitri Van De Ville -% - faster version by single index - -switch nargin - case 1 - offset=1; - case 2 - offset=varargin{1}; -end - -% get indices of upper triangular part (Peter Acklam's trick) -%[m_i m_j] = find(triu(ones(size(m)), offset)); -idx = find(triu(ones(size(m)), offset)); - -v=m(idx); - -% copy to vector -%v=zeros(numel(m_i),1); -%for v_idx=1:numel(m_i) -% v(v_idx)=m(m_i(v_idx),m_j(v_idx)); -%end \ No newline at end of file diff --git a/Analysis/munkres.m b/Analysis/munkres.m deleted file mode 100755 index 58a30c8..0000000 --- a/Analysis/munkres.m +++ /dev/null @@ -1,200 +0,0 @@ -function [assignment,cost] = munkres(costMat) -% MUNKRES Munkres (Hungarian) Algorithm for Linear Assignment Problem. -% -% [ASSIGN,COST] = munkres(COSTMAT) returns the optimal column indices, -% ASSIGN assigned to each row and the minimum COST based on the assignment -% problem represented by the COSTMAT, where the (i,j)th element represents the cost to assign the jth -% job to the ith worker. -% -% Partial assignment: This code can identify a partial assignment is a full -% assignment is not feasible. For a partial assignment, there are some -% zero elements in the returning assignment vector, which indicate -% un-assigned tasks. The cost returned only contains the cost of partially -% assigned tasks. - -% This is vectorized implementation of the algorithm. It is the fastest -% among all Matlab implementations of the algorithm. - -% Examples -% Example 1: a 5 x 5 example -%{ -[assignment,cost] = munkres(magic(5)); -disp(assignment); % 3 2 1 5 4 -disp(cost); %15 -%} -% Example 2: 400 x 400 random data -%{ -n=400; -A=rand(n); -tic -[a,b]=munkres(A); -toc % about 2 seconds -%} -% Example 3: rectangular assignment with inf costs -%{ -A=rand(10,7); -A(A>0.7)=Inf; -[a,b]=munkres(A); -%} -% Example 4: an example of partial assignment -%{ -A = [1 3 Inf; Inf Inf 5; Inf Inf 0.5]; -[a,b]=munkres(A) -%} -% a = [1 0 3] -% b = 1.5 -% Reference: -% "Munkres' Assignment Algorithm, Modified for Rectangular Matrices", -% http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html - -% version 2.3 by Yi Cao at Cranfield University on 11th September 2011 - -assignment = zeros(1,size(costMat,1)); -cost = 0; - -validMat = costMat == costMat & costMat < Inf; -bigM = 10^(ceil(log10(sum(costMat(validMat))))+1); -costMat(~validMat) = bigM; - -% costMat(costMat~=costMat)=Inf; -% validMat = costMat0) - break - end - coverColumn = false(1,n); - coverColumn(starZ(starZ>0))=true; - coverRow = false(n,1); - primeZ = zeros(n,1); - [rIdx, cIdx] = find(dMat(~coverRow,~coverColumn)==bsxfun(@plus,minR(~coverRow),minC(~coverColumn))); - while 1 - %************************************************************************** - % STEP 4: Find a noncovered zero and prime it. If there is no starred - % zero in the row containing this primed zero, Go to Step 5. - % Otherwise, cover this row and uncover the column containing - % the starred zero. Continue in this manner until there are no - % uncovered zeros left. Save the smallest uncovered value and - % Go to Step 6. - %************************************************************************** - cR = find(~coverRow); - cC = find(~coverColumn); - rIdx = cR(rIdx); - cIdx = cC(cIdx); - Step = 6; - while ~isempty(cIdx) - uZr = rIdx(1); - uZc = cIdx(1); - primeZ(uZr) = uZc; - stz = starZ(uZr); - if ~stz - Step = 5; - break; - end - coverRow(uZr) = true; - coverColumn(stz) = false; - z = rIdx==uZr; - rIdx(z) = []; - cIdx(z) = []; - cR = find(~coverRow); - z = dMat(~coverRow,stz) == minR(~coverRow) + minC(stz); - rIdx = [rIdx(:);cR(z)]; - cIdx = [cIdx(:);stz(ones(sum(z),1))]; - end - if Step == 6 - % ************************************************************************* - % STEP 6: Add the minimum uncovered value to every element of each covered - % row, and subtract it from every element of each uncovered column. - % Return to Step 4 without altering any stars, primes, or covered lines. - %************************************************************************** - [minval,rIdx,cIdx]=outerplus(dMat(~coverRow,~coverColumn),minR(~coverRow),minC(~coverColumn)); - minC(~coverColumn) = minC(~coverColumn) + minval; - minR(coverRow) = minR(coverRow) - minval; - else - break - end - end - %************************************************************************** - % STEP 5: - % Construct a series of alternating primed and starred zeros as - % follows: - % Let Z0 represent the uncovered primed zero found in Step 4. - % Let Z1 denote the starred zero in the column of Z0 (if any). - % Let Z2 denote the primed zero in the row of Z1 (there will always - % be one). Continue until the series terminates at a primed zero - % that has no starred zero in its column. Unstar each starred - % zero of the series, star each primed zero of the series, erase - % all primes and uncover every line in the matrix. Return to Step 3. - %************************************************************************** - rowZ1 = find(starZ==uZc); - starZ(uZr)=uZc; - while rowZ1>0 - starZ(rowZ1)=0; - uZc = primeZ(rowZ1); - uZr = rowZ1; - rowZ1 = find(starZ==uZc); - starZ(uZr)=uZc; - end -end - -% Cost of assignment -rowIdx = find(validRow); -colIdx = find(validCol); -starZ = starZ(1:nRows); -vIdx = starZ <= nCols; -assignment(rowIdx(vIdx)) = colIdx(starZ(vIdx)); -pass = assignment(assignment>0); -pass(~diag(validMat(assignment>0,pass))) = 0; -assignment(assignment>0) = pass; -cost = trace(costMat(assignment>0,assignment(assignment>0))); - -function [minval,rIdx,cIdx]=outerplus(M,x,y) -ny=size(M,2); -minval=inf; -for c=1:ny - M(:,c)=M(:,c)-(x+y(c)); - minval = min(minval,min(M(:,c))); -end -[rIdx,cIdx]=find(M==minval); diff --git a/CAP_TB.fig b/CAP_TB.fig deleted file mode 100755 index 3aa7f81..0000000 Binary files a/CAP_TB.fig and /dev/null differ diff --git a/CAP_TB.m b/CAP_TB.m deleted file mode 100755 index edd861c..0000000 --- a/CAP_TB.m +++ /dev/null @@ -1,3476 +0,0 @@ -%% This is the main script containing the routines necessary for the use -% of the co-activation pattern analysis (CAP) toolbox -% -% Implemented and written by Thomas Bolton, Medical Image Processing -% Laboratory (MIP:Lab) -% -% Version 1.0: November 9th 2018: fixing the last remaining issues -% -function varargout = CAP_TB(varargin) - -gui_Singleton = 1; -gui_State = struct('gui_Name', mfilename, ... - 'gui_Singleton', gui_Singleton, ... - 'gui_OpeningFcn', @CAP_TB_OpeningFcn, ... - 'gui_OutputFcn', @CAP_TB_OutputFcn, ... - 'gui_LayoutFcn', [] , ... - 'gui_Callback', []); -if nargin && ischar(varargin{1}) - gui_State.gui_Callback = str2func(varargin{1}); -end - -if nargout - [varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:}); -else - gui_mainfcn(gui_State, varargin{:}); -end - - - - - -%% Executes when the window opens -function CAP_TB_OpeningFcn(hObject, eventdata, handles, varargin) - -%%%%%%%%%%%%%%%%%%%% -% Path and other miscellaneous settings - -% Adds the paths to the subfolders of the toolbox that will be important -% for the plotting and the analysis -addpath(genpath('./Plotting')); -addpath(genpath('./Analysis')); -addpath(genpath('./DefaultData')); - -% Sets warnings off -warning('off'); - -% Choose default command line output for CAP_TB -handles.output = hObject; - - -%%%%%%%%%%%%%%%%%%%% -% Data loading - -% TC will contain the time courses of the subjects from the different -% populations (cell array, each cell with size n_TP x n_masked_voxels) -handles.TC = {}; - -% FD contains the traces of framewise displacement for the subjects (n_TP x -% n_subj per cell of the array, one cell per dataset) -handles.FD = {}; - -% Information on the NIFTI files from which the data originate -handles.brain_info = {}; - -% Mask used prior to CAP analysis -handles.mask = {}; - -% Number of datasets added to the interface. A dataset is defined as a -% population of subjects from the same experimental group (e.g., an -% ensemble of subjects suffering from the same disorder) -handles.n_datasets = 0; - -% Stores the number of subjects that have been loaded -handles.n_subjects = {}; - -% SubjNames contains the names of the files from which subject data have -% been sampled (full paths) -handles.SubjNames = {}; - -% MotName contains the name(s) of the file(s) loaded as motion ones -handles.MotName = {}; - -% TP and VOX contain the number of time points (of frames) and of brain -% voxels that are present in the loaded datasets. Those values are -% initialized at -inf, and then take the values of the first file that is -% being loaded if that file looks reasonable dimensionally speaking. In the -% scripts below, it is assumed that all subject populations loaded have a -% similar number of time points and of voxels -handles.SubjSize.TP = -inf; -handles.SubjSize.VOX = -inf; - -% By default, the reference population from which CAPs will be extracted -% will be the first uploaded one -% Note: for the moment, this parameter is fixed to 1 (no functionality for -% modifying it yet) -handles.ReferencePopulation = 1; - -% Loads and sets the brain underlay used for plotting purposes -Underlay = load_nii('Underlay.nii'); -Underlay_mat = [Underlay.hdr.hist.srow_x; Underlay.hdr.hist.srow_y; Underlay.hdr.hist.srow_z; 0 0 0 1]; -Underlay_dim = Underlay.hdr.dime.dim; -Underlay_dim = Underlay_dim(2:4); -handles.Underlay_info.dim = Underlay_dim; -handles.Underlay_info.mat = Underlay_mat; -clear Underlay -clear Underlay_dim -clear Underlay_mat -load('brain.mat'); -assignin('base','brain', brain); -handles.brain = brain; -clear brain - -% Handles for the TR and whether it is a reasonable value -handles.TR = -inf; -handles.isTROK = false; - - -%%%%%%%%%%%%%%%%%%%% -% Seed selection and seed maps - -% Seed used for the analysis -handles.seed = []; - -% Because there can be more than one seed, we create a vector that will -% encompass the multiple information in several colors -handles.seed_display = []; - -% Handle to verify whether the amount of seeds has been entered well, how -% many seeds there are, and the type -handles.isSeedOK = false; - -% Option to have a subject-specific seed -handles.isSeedSubjectSpecific = 0; - -% Number of different seeds -handles.n_seed = 1; - -handles.SeedType = 'Average'; - -% One average map throughout subjects -handles.AvgSeedMap = []; - -%%%%%%%%%%%%%%%%%%%% -% Time points selection - -% Motion threshold for scrubbing -handles.Tmot = 0.5; - -% Threshold for frame selection in the analysis -handles.T = 0.5; - -% Sets the right text header in front of the frame selection threshold box -% (threshold or retention percentage) -if get(handles.TRadio,'Value') - set(handles.TText,'String','T [-]'); - handles.SelMode = 'Threshold'; -else - set(handles.TText,'String','P [%]'); - handles.SelMode = 'Percentage'; -end - -% Denotes the type of frames (activation, deactivation or both) to use for -% selecting time points -handles.SignMatrix = [1 0; 1 0]; - -% Activation and deactivation frames kept for all datasets -handles.Xonp = {}; -handles.Xonn = {}; - -% Percentage of frames retained for CAP analysis (discarding both the -% baseline time points and the scrubbed time points) -handles.RetainedPercentage = {}; - -% Indices of the frames that have been retained (i.e. when do they occur in -% the full time course), of baseline frames, and of scrubbed frames -handles.FrameIndices = {}; - -handles.idx_sep_seeds = {}; - -%%%%%%%%%%%%%%%%%%%% -% CAP analysis - -% Max number of clusters to verify with consensus clustering -handles.Kmax = 12; - -% Percentage of items to use for the consensus clustering folds -handles.PCC = 80; - -% Number of times that clustering is run -handles.n_rep = 20; - -% Percentage voxels to keep for clustering (positive - Pp - and negative - -% Pn - ones) -handles.Pp = 100; -handles.Pn = 100; - -% Number of clusters to use in the analysis -handles.K = 5; - -% Indices of the CAP to which frames from the reference population and from -% the other populations are assigned -handles.idx = {}; - -% Value of correlation of the control group frame that is the Tper-th least -% close to its CAP -handles.CorrDist = []; - -% Contains the CAPs -handles.CAP = []; - -% Contains the standard deviation for the CAPs -handles.STDCAP = []; - -% Percentile threshold used in frame assignment -handles.percentile = 5; - -%%%%%%%%%%%%%%%%%%%% -% Metrics - -% Will contain the metrics -% State matrix (n_subjects x n_time points) -handles.TPM = {}; - -% State counts (raw and frac) -handles.Counts = {}; - -% Number of times entering a state -handles.Number = {}; - -% Average duration within a state -handles.Avg_Duration = {}; - -% Duration of all the excursions within a state -handles.Duration = {}; - -handles.From_Baseline = {}; - -handles.To_Baseline = {}; - -handles.Baseline_resilience = {}; - -handles.Resilience = {}; - -handles.Betweenness = {}; - -handles.kin = {}; - -handles.kout = {}; - -% Transition probabilities -handles.TM = {}; - -% Cumulative sum of states -handles.TPMCum = {}; - -% Seed fractions -handles.sfrac = []; - -%%%%%%%%%%%%%%%%%%%% -% General utilities - -% Log containing the different events summoned from the toolbox -handles.Log = {}; - -% Colors used in plotting of all populations -handles.PopColor{1} = [255,255,180; 219,224,252; 188,252,188; 230,230,230]/255; -handles.PopColor{2} = [130,48,48; 51,75,163; 59,113,86; 0, 0, 0]/255; - -% Project title, by default 'Untitled' -handles.project_title = 'Untitled'; - -% Directory to which data is to be saved (initially loaded as ./SavedData) -handles.savedir = fullfile(pwd,'SavedData'); -set(handles.SaveFolderText,'String',handles.savedir); - -% Update handles structure -guidata(hObject, handles); - -% --- Outputs from this function are returned to the command line. -function varargout = CAP_TB_OutputFcn(hObject, eventdata, handles) - -% Get default command line output from handles structure -varargout{1} = handles.output; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%% SECTION 1: LOADING %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%% Data Button Click - -% Executes when adding a subject population (clicking on 'A. Load data') -function DataButton_Callback(hObject, eventdata, handles) - % Opens up a menu to choose the required files for the analysis; the user - % must select four files: - % 1. Data file - % 2. Mask file - % 3. Info file (header of NIFTI) - % 4. Motion file - - % He can select them in the order he likes - [filename1,pathname1]=uigetfile({'*.*','All Files'},... - 'Select data, motion, mask and brain info files...','MultiSelect','on'); - - % If the user has indeed entered files - if ~isequal(filename1,0) || ~isequal(pathname1,0) - % There should be four selected files. In this switch, we test - % for the amount of entered files - if length(filename1) == 4 - - % The files are loaded sequentially - for i = 1:length(filename1) - File{i} = fullfile(pathname1, filename1{i}); - tmp = load(File{i}); - assignin('base','tmp', tmp); - tmp = struct2array(tmp); - - % Finds what type of file DataType is between the four - % possibilities - DataType = CAP_FindDataType(tmp); - - % Accordingly, fill the right handle with the information - switch DataType - case 'Data' - % We store the data into handles.TC and the file name that goes - % with it - handles.TC{handles.n_datasets+1} = tmp; - - % Takes only the last two parts of the file name and - % puts them in tmp_file - [tmp_file,n_delim] = strsplit(File{i},'/'); - - if isempty(n_delim) - tmp_file = strsplit(File{i},'\'); - end - - tmp_file = tmp_file(end-1:end); - - % This is what is saved and displayed in the main - % window then - handles.SubjNames{handles.n_datasets+1} = fullfile(tmp_file{1},tmp_file{2}); - handles.n_subjects{handles.n_datasets+1} = size(handles.TC{handles.n_datasets+1},2); - - % Some commands are run only for the first dataset that we add - if handles.n_datasets == 0 - % We compute and store the number of voxels and the number of time - % points, as well as the number of subjects - handles.SubjSize.VOX = size(handles.TC{1}{1},2); - handles.SubjSize.TP = size(handles.TC{1}{1},1); - end - - % Sets the text label about data dimensions - set(handles.Dimensionality_Text, 'String', [num2str(handles.SubjSize.TP),... - ' frames x ',num2str(handles.SubjSize.VOX),' voxels (',... - strjoin(arrayfun(@(x) num2str(x),cell2mat(handles.n_subjects),... - 'UniformOutput',false),'+'),')']); - - case 'Motion' - % We store the path of the motion file added - handles.MotName{handles.n_datasets+1} = File{i}; - % If the dimensions hold, we store the file into the FD variable - % and then plot the FD ribbon graph - handles.FD{handles.n_datasets+1} = tmp; - - case 'Mask' - handles.mask{handles.n_datasets+1} = tmp; - case 'Info' - % If so, we store the value and we validate the choice - handles.brain_info{handles.n_datasets+1} = tmp; - - % If the data file is unknown, then we return an error and - % the user must enter files again - case 'Unknown' - errordlg('At least one of the selected files is not recognized; please try again !'); - handles = ClearDataButton_Callback(handles.ClearDataButton, eventdata, handles); - end - - end - - % Check if the dimensionality of the entered data holds between - % the file types. It may be that the user entered four files of - % the same type (e.g. four data files), rather than one of each - % type as required - [is_DataOK,Data_problems] = CAP_IsDataOK(handles.TC{handles.n_datasets+1},handles.FD{handles.n_datasets+1},... - handles.mask{handles.n_datasets+1},handles.brain_info{handles.n_datasets+1}); - if is_DataOK - - % We increment handles.n_datasets - handles.n_datasets = handles.n_datasets + 1; - - % We can now enable the seed selection - set(handles.SeedButton,'Enable','on'); - - % Also, we can now color the button in green - set(hObject,'BackgroundColor', [101,140,196]/255); - - % If we are loading the first dataset, we convert the underlay - % to the resolution of the functional data for plotting - if handles.n_datasets == 1 - - % If we are loading the first dataset, we can get rid - % of anything else that we have so far - handles = ClearSection3(eventdata,handles); - handles = ClearSection4(eventdata,handles); - - % The brain variable now contains a good resolution - % underlay that can directly be overlapped with the - % functional data - handles.brain = CAP_V2V(handles.brain,handles.Underlay_info.dim,... - handles.Underlay_info.mat,handles.brain_info{1}.dim,handles.brain_info{1}.mat); - - elseif handles.n_datasets > 1 && handles.n_datasets < 5 && ~isempty(handles.CAP) - - % If we are loading one more dataset on top of the - % first one, then we rather want to solely clear the - % Metrics content; but we may want to keep the rest - handles = ClearSection4(eventdata,handles); - - set(handles.AssignButton,'Enable','on'); - - set(handles.CAP_TP,'Visible','on'); - set(handles.Percentile_Edit,'Visible','on'); - - elseif handles.n_datasets > 4 - errordlg('Please enter at most four different populations in the interface !'); - handles = ClearDataButton_Callback(handles.ClearDataButton, eventdata, handles); - end - - handles.Log = CAP_AddToLog(handles.Log,'Data correctly loaded'); - - % If it doesn't hold, then we return an error - else - errordlg(['There is a dimensionality problem in your data: ',Data_problems]); - handles = ClearDataButton_Callback(handles.ClearDataButton, eventdata, handles); - end - - % If a different number of files is entered, then there is a problem, - % and everything is reset - else - errordlg('You did not enter the correct number of files !'); - handles = ClearDataButton_Callback(handles.ClearDataButton, eventdata, handles); - end - % Else, an error is displayed and the user is prompted to enter files - else - errordlg('Cancelling data entry will not solve your problems !'); - handles = ClearDataButton_Callback(handles.ClearDataButton, eventdata, handles); - end - -% Update handles structure -guidata(hObject, handles); - - - - - -%% TR Textbox Interaction - -% Executes when we go to the TR field to add the TR of the experiment -function TR_Entry_Callback(hObject, eventdata, handles) - - % If the TR takes a reasonable value, then we validate it; we enable values - % between 0.5 and 5s - if (~isempty(str2double(get(hObject,'String')))) && ... - (str2double(get(hObject,'String')) > 0.5) && ... - (str2double(get(hObject,'String')) <= 5) - - handles.TR = str2double(get(hObject,'String')); - set(hObject,'BackgroundColor', [101,140,196]/255); - handles.isTROK = true; - - handles.Log = CAP_AddToLog(handles.Log,'Correct value of TR entered',{handles.TR},{'TR'}); - - % Else, the TR value is not accepted - else - set(hObject,'BackgroundColor', [204,146,146]/255); - handles.isTROK = false; - end - -guidata(hObject, handles); - - - -% Executes during creation of the TR textbox -function handles = TR_Entry_CreateFcn(hObject, eventdata, handles) - - set(hObject,'Enable','off'); - set(hObject,'String','Click to enter...'); - set(hObject,'FontAngle','italic'); - - if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor','r'); - end - -guidata(hObject, handles); - - - -% Executes when clicking on the TR text space -function TR_Entry_ButtonDownFcn(hObject, eventdata, handles) - - set(hObject,'Enable','on'); - set(hObject,'String',''); - set(hObject,'FontAngle','normal'); - uicontrol(hObject); - -guidata(hObject, handles); - - - - - -%% Seed Button Controls -% We want to define what happens when loading data (SeedButton) or when -% attempting to plot them (PlotSeedButton) - -% --- Executes on button press in CheckS1POS. -function CheckS1POS_Callback(hObject, eventdata, handles) - -if get(hObject,'Value') - set(handles.CheckS1NEG,'Value',0); -else - set(handles.CheckS1POS,'Value',1); -end - -handles.SignMatrix(2,:) = [1 0]; - -rectangle('Position',[0 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); - -% Modifies the axis parameters to have a big enough seed display -axis(handles.FancyCircles,'square'); - -switch handles.n_seed - case 1 - set(handles.FancyCircles,'xlim',[-10 10]); - set(handles.FancyCircles,'ylim',[-10 10]); - case 2 - set(handles.FancyCircles,'xlim',[-10 30]); - set(handles.FancyCircles,'ylim',[-10 30]); - case 3 - set(handles.FancyCircles,'xlim',[-10 50]); - set(handles.FancyCircles,'ylim',[-10 50]); -end - -guidata(hObject,handles); - - - -% --- Executes on button press in CheckS1NEG. -function CheckS1NEG_Callback(hObject, eventdata, handles) - -if get(hObject,'Value') - handles.SignMatrix(1,:) = [0 1]; - set(handles.CheckS1POS,'Value',0); - rectangle('Position',[0 0 10 10],'Curvature',[1 1],'FaceColor',[51,75,163]/255,'EdgeColor','none','Parent',handles.FancyCircles); -else - handles.SignMatrix(1,:) = [1 0]; - set(handles.CheckS1POS,'Value',1); - rectangle('Position',[0 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); -end - -% Modifies the axis parameters to have a big enough seed display -axis(handles.FancyCircles,'square'); - -switch handles.n_seed - case 1 - set(handles.FancyCircles,'xlim',[-10 10]); - set(handles.FancyCircles,'ylim',[-10 10]); - case 2 - set(handles.FancyCircles,'xlim',[-10 30]); - set(handles.FancyCircles,'ylim',[-10 30]); - case 3 - set(handles.FancyCircles,'xlim',[-10 50]); - set(handles.FancyCircles,'ylim',[-10 50]); -end - -guidata(hObject,handles); - - -% --- Executes on button press in CheckS2POS. -function CheckS2POS_Callback(hObject, eventdata, handles) - -if get(hObject,'Value') - set(handles.CheckS2NEG,'Value',0); -else - set(handles.CheckS2POS,'Value',1); -end - -handles.SignMatrix(2,:) = [1 0]; -rectangle('Position',[20 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); - -% Modifies the axis parameters to have a big enough seed display -axis(handles.FancyCircles,'square'); - -switch handles.n_seed - case 1 - set(handles.FancyCircles,'xlim',[-10 10]); - set(handles.FancyCircles,'ylim',[-10 10]); - case 2 - set(handles.FancyCircles,'xlim',[-10 30]); - set(handles.FancyCircles,'ylim',[-10 30]); - case 3 - set(handles.FancyCircles,'xlim',[-10 50]); - set(handles.FancyCircles,'ylim',[-10 50]); -end - -guidata(hObject,handles); - - -% --- Executes on button press in CheckS2NEG. -function CheckS2NEG_Callback(hObject, eventdata, handles) - -if get(hObject,'Value') - handles.SignMatrix(2,:) = [0 1]; - set(handles.CheckS2POS,'Value',0); - rectangle('Position',[20 0 10 10],'Curvature',[1 1],'FaceColor',[51,75,163]/255,'EdgeColor','none','Parent',handles.FancyCircles); -else - handles.SignMatrix(2,:) = [1 0]; - set(handles.CheckS2POS,'Value',1); - rectangle('Position',[20 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); -end - -% Modifies the axis parameters to have a big enough seed display -axis(handles.FancyCircles,'square'); - -switch handles.n_seed - case 1 - set(handles.FancyCircles,'xlim',[-10 10]); - set(handles.FancyCircles,'ylim',[-10 10]); - case 2 - set(handles.FancyCircles,'xlim',[-10 30]); - set(handles.FancyCircles,'ylim',[-10 30]); - case 3 - set(handles.FancyCircles,'xlim',[-10 50]); - set(handles.FancyCircles,'ylim',[-10 50]); -end - -guidata(hObject,handles); - - - -% --- Executes on button press in CheckS3POS. -function CheckS3POS_Callback(hObject, eventdata, handles) - -if get(hObject,'Value') - set(handles.CheckS3NEG,'Value',0); -else - set(handles.CheckS3POS,'Value',1); -end - -handles.SignMatrix(3,:) = [1 0]; - -rectangle('Position',[40 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); - -% Modifies the axis parameters to have a big enough seed display -axis(handles.FancyCircles,'square'); - -switch handles.n_seed - case 1 - set(handles.FancyCircles,'xlim',[-10 10]); - set(handles.FancyCircles,'ylim',[-10 10]); - case 2 - set(handles.FancyCircles,'xlim',[-10 30]); - set(handles.FancyCircles,'ylim',[-10 30]); - case 3 - set(handles.FancyCircles,'xlim',[-10 50]); - set(handles.FancyCircles,'ylim',[-10 50]); -end - -guidata(hObject,handles); - -% --- Executes on button press in CheckS3NEG. -function CheckS3NEG_Callback(hObject, eventdata, handles) - -if get(hObject,'Value') - handles.SignMatrix(3,:) = [0 1]; - set(handles.CheckS3POS,'Value',0); - rectangle('Position',[40 0 10 10],'Curvature',[1 1],'FaceColor',[51,75,163]/255,'EdgeColor','none','Parent',handles.FancyCircles); -else - handles.SignMatrix(3,:) = [1 0]; - set(handles.CheckS3POS,'Value',1); - rectangle('Position',[40 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); -end - -% Modifies the axis parameters to have a big enough seed display -axis(handles.FancyCircles,'square'); - -switch handles.n_seed - case 1 - set(handles.FancyCircles,'xlim',[-10 10]); - set(handles.FancyCircles,'ylim',[-10 10]); - case 2 - set(handles.FancyCircles,'xlim',[-10 30]); - set(handles.FancyCircles,'ylim',[-10 30]); - case 3 - set(handles.FancyCircles,'xlim',[-10 50]); - set(handles.FancyCircles,'ylim',[-10 50]); -end - -guidata(hObject,handles); - - - -% Executes when the user changes the type of seed relationship desired -% (Average, Intersection, Union of seed signals) -function SeedPopup_Callback(hObject, eventdata, handles) - -% Clears and makes the circles display reflecting seed types unvisible -handles = ResetGraphDisplay(handles.FancyCircles,handles); - -% We consider the value selected by the user for the first seed between -% '+' or '-', and update the FancyCircles plot accordingly -if get(handles.CheckS1POS,'Value') - rectangle('Position',[0 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); -elseif get(handles.CheckS1NEG,'Value') - rectangle('Position',[0 0 10 10],'Curvature',[1 1],'FaceColor',[51,75,163]/255,'EdgeColor','none','Parent',handles.FancyCircles); -end - -% The same is done for the other seeds, if more than one has been loaded -% (or if more than two have been loaded) -if handles.n_seed > 1 - - if get(handles.CheckS2POS,'Value') - rectangle('Position',[20 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); - elseif get(handles.CheckS2NEG,'Value') - rectangle('Position',[20 0 10 10],'Curvature',[1 1],'FaceColor',[51,75,163]/255,'EdgeColor','none','Parent',handles.FancyCircles); - end - - if handles.n_seed > 2 - - if get(handles.CheckS3POS,'Value') - rectangle('Position',[40 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); - elseif get(handles.CheckS3NEG,'Value') - rectangle('Position',[40 0 10 10],'Curvature',[1 1],'FaceColor',[51,75,163]/255,'EdgeColor','none','Parent',handles.FancyCircles); - end - end -end - -% The second step in the plotting is to add the "arrows" that link circles -% differently depending on whether we consider a Union or an Intersection -switch get(hObject,'Value') - - % If we have the "average" pick - case 1 - % If the user selects "average" but the number of seeds entered - % exceeds 1, then by default we revert back to the "union" case, - % since the user should enter one averaged seed file to have an - % averaged seed output - if handles.n_seed > 1 - set(hObject,'Value',2); - handles.SeedType = 'Union'; - rectangle('Position',[12 -10 6 8],'Curvature',[0.8 0.8],'Parent',handles.FancyCircles); - rectangle('Position',[11 -5 8 4],'Curvature',[0.8 0.8],'EdgeColor','none','FaceColor','w','Parent',handles.FancyCircles); - if handles.n_seed > 2 - rectangle('Position',[32 -10 6 8],'Curvature',[0.8 0.8],'Parent',handles.FancyCircles); - rectangle('Position',[31 -5 8 4],'Curvature',[0.8 0.8],'EdgeColor','none','FaceColor','w','Parent',handles.FancyCircles); - end - else - handles.SeedType = 'Average'; - end - - % If we have the union pick - case 2 - handles.SeedType = 'Union'; - rectangle('Position',[12 -10 6 8],'Curvature',[0.8 0.8],'Parent',handles.FancyCircles); - rectangle('Position',[11 -5 8 4],'Curvature',[0.8 0.8],'EdgeColor','none','FaceColor','w','Parent',handles.FancyCircles); - if handles.n_seed > 2 - rectangle('Position',[32 -10 6 8],'Curvature',[0.8 0.8],'Parent',handles.FancyCircles); - rectangle('Position',[31 -5 8 4],'Curvature',[0.8 0.8],'EdgeColor','none','FaceColor','w','Parent',handles.FancyCircles); - end - - % If we have the intersection pick - case 3 - handles.SeedType = 'Intersection'; - rectangle('Position',[12 10 6 8],'Curvature',[0.8 0.8],'Parent',handles.FancyCircles); - rectangle('Position',[11 9 8 4],'Curvature',[0.8 0.8],'EdgeColor','none','FaceColor','w','Parent',handles.FancyCircles); - if handles.n_seed > 2 - rectangle('Position',[32 10 6 8],'Curvature',[0.8 0.8],'Parent',handles.FancyCircles); - rectangle('Position',[31 9 8 4],'Curvature',[0.8 0.8],'EdgeColor','none','FaceColor','w','Parent',handles.FancyCircles); - end -end - -% Modifies the axis parameters to have a big enough seed display -axis(handles.FancyCircles,'square'); - -switch handles.n_seed - case 1 - set(handles.FancyCircles,'xlim',[-10 10]); - set(handles.FancyCircles,'ylim',[-10 10]); - case 2 - set(handles.FancyCircles,'xlim',[-10 30]); - set(handles.FancyCircles,'ylim',[-10 30]); - case 3 - set(handles.FancyCircles,'xlim',[-10 50]); - set(handles.FancyCircles,'ylim',[-10 50]); -end - -handles.Log = CAP_AddToLog(handles.Log,'Seed union status changed',{handles.SeedType},{'Status'}); - -guidata(hObject, handles); - - - -% Executes during object creation, after setting all properties. -function SeedPopup_CreateFcn(hObject, eventdata, handles) - -if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor','white'); -end - -guidata(hObject, handles); - - - -% Executes when clicking on 'B. Select a seed' -function SeedButton_Callback(hObject, eventdata, handles) - -% We want to clear everything from and after this stage, since we will -% compute results with a new seed -handles = ClearSection3(eventdata,handles); -handles = ClearSection4(eventdata,handles); - -% Multiselection is on, so that as many as three files can be picked -[filename_seed,pathname_seed]=uigetfile({'*.*','All Files'},... - 'Select Seed File...','MultiSelect','on'); - -% Stores the type of frames to retain for each seed -handles.SignMatrix = [1 0; 1 0; 1 0]; - -% Resets the graph displays as well as the seed-related parameters -handles = ResetGraphDisplay(handles.FancyCircles,handles); - -set(handles.Seed1Text,'Visible','off'); -set(handles.Seed2Text,'Visible','off'); -set(handles.Seed3Text,'Visible','off'); - -set(handles.SeedPlusText,'Visible','off'); -set(handles.SeedMinusText,'Visible','off'); - -set(handles.CheckS1POS,'Visible','off'); -set(handles.CheckS1NEG,'Visible','off'); - -set(handles.CheckS1POS,'Value',1); -set(handles.CheckS1NEG,'Value',0); - -set(handles.CheckS2POS,'Visible','off'); -set(handles.CheckS2NEG,'Visible','off'); - -set(handles.CheckS2POS,'Value',1); -set(handles.CheckS2NEG,'Value',0); - -set(handles.CheckS3POS,'Visible','off'); -set(handles.CheckS3NEG,'Visible','off'); - -set(handles.CheckS3POS,'Value',1); -set(handles.CheckS3NEG,'Value',0); - -handles = ResetGraphDisplay(handles.SeedGraphX,handles); -handles = ResetGraphDisplay(handles.SeedGraphZ,handles); -handles = ResetGraphDisplay(handles.SeedMapX,handles); -handles = ResetGraphDisplay(handles.SeedMapZ,handles); - -% If the user has indeed entered files -if ~isequal(filename_seed,0) || ~isequal(pathname_seed,0) - % There should be three or less selected files. In this switch, we test - % for the amount of entered files - - % In the case in which only one seed file is entered ('char' type), - % we convert into an array - if strcmp(class(filename_seed),'char') - filename_seed = {filename_seed}; - end - - % If we enter that statement, it means that we have only one seed type - % across subjects (that is, no subject-specific data) - if length(filename_seed) <= 3 - - % Number of entered seeds - handles.n_seed = length(filename_seed); - - for myindex = 1:length(filename_seed) - File_seed = fullfile(pathname_seed, filename_seed{myindex}); - tmp = load(File_seed); - assignin('base','tmp', tmp); - tmp = struct2array(tmp); - - % If the file is of suitable dimensions - if islogical(tmp) && size(tmp,2) == 1 && size(tmp,1) == sum(handles.mask{1}) - - % Then we put it in the handles, enable the plotting button, and - % make the seed selection button green - handles.seed(:,myindex) = tmp; - - handles.Log = CAP_AddToLog(handles.Log,'Seed chosen',{File_seed},{'Seed file'}); - else - errordlg('The file you entered appears to be of wrong dimensions...'); - handles = ClearDataButton_Callback(handles.ClearDataButton, eventdata, handles); - end - - end - - % If we survive all the above, we can see that the seed files are - % good - handles.isSeedOK = true; - - % We also enable to plot the seed results - set(handles.PlotSeedButton,'Enable','on'); - set(handles.PlotSeedButton,'Visible','on'); - - % We make the text legends visible - set(handles.S_SEED1,'Visible','on'); - - set(handles.SeedButton,'BackgroundColor', [101,140,196]/255); - - % We can now go through the next parts of the analysis, so we - % enable the related buttons - set(handles.TPSelectionButton,'Enable','on'); - set(handles.SeedMapPushButton,'Enable','on'); - - set(handles.TPSelectionButton,'Visible','on'); - set(handles.SeedMapPushButton,'Visible','on'); - - % Makes other TP selection utilities visible - set(handles.PRadio,'Visible','on'); - set(handles.TRadio,'Visible','on'); - set(handles.uibuttongroup7,'Visible','on'); - set(handles.TText,'Visible','on'); - set(handles.TMotText,'Visible','on'); - set(handles.TEdit,'Visible','on'); - set(handles.TMotEdit,'Visible','on'); - - % We also see the displays for entering seed specifications - set(handles.SeedPopup,'Visible','on'); - - handles.Log = CAP_AddToLog(handles.Log,'Correct amount of seeds entered',{handles.n_seed},{'Seed amount'}); - - handles.seed_display = zeros(length(handles.seed(:,1)),1); - - set(handles.CheckS1POS,'Visible','on'); - set(handles.CheckS1NEG,'Visible','on'); - CheckS1POS_Callback(handles.CheckS1POS,eventdata,handles); - - set(handles.Seed1Text,'Visible','on'); - - set(handles.SeedPlusText,'Visible','on'); - set(handles.SeedMinusText,'Visible','on'); - - % If there are more than one seed, then we allow the popup button to be - % changed for a more complex seed use choice - if handles.n_seed > 1 - set(handles.SeedPopup,'Enable','on'); - set(handles.SeedPopup,'Value',2); - handles.SeedType = 'Union'; - - % We also update the circles in the seed illustration - rectangle('Position',[12 -10 6 8],'Curvature',[0.8 0.8],'Parent',handles.FancyCircles); - rectangle('Position',[11 -5 8 4],'Curvature',[0.8 0.8],'EdgeColor','none','FaceColor','w','Parent',handles.FancyCircles); - - % We fill seed_display with one scalar value across seed voxels - % per seed (to have different colors plotted in the seed choice - % graph - useless_vector = [0.25,0.75,1]; - - for se = 1:handles.n_seed - handles.seed_display = handles.seed_display + useless_vector(se)*handles.seed(:,se); - end - - set(handles.S_SEED2,'Visible','on'); - - set(handles.CheckS2POS,'Visible','on'); - set(handles.CheckS2NEG,'Visible','on'); - CheckS2POS_Callback(handles.CheckS2POS,eventdata,handles); - - set(handles.Seed2Text,'Visible','on'); - - % Same for 3 seeds - if handles.n_seed > 2 - set(handles.CheckS3POS,'Visible','on'); - set(handles.CheckS3NEG,'Visible','on'); - CheckS3POS_Callback(handles.CheckS3POS,eventdata,handles); - - set(handles.Seed3Text,'Visible','on'); - - rectangle('Position',[32 -10 6 8],'Curvature',[0.8 0.8],'Parent',handles.FancyCircles); - rectangle('Position',[31 -5 8 4],'Curvature',[0.8 0.8],'EdgeColor','none','FaceColor','w','Parent',handles.FancyCircles); - - set(handles.S_SEED3,'Visible','on'); - end - - % Entered if we have just one seed - else - set(handles.SeedPopup,'Enable','off'); - set(handles.SeedPopup,'Value',1); - handles.SeedType = 'Average'; - - handles.seed_display = handles.seed; - - set(handles.CheckS1POS,'Visible','on'); - set(handles.CheckS1NEG,'Visible','on'); - end - - rectangle('Position',[0 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); - - if handles.n_seed > 1 - rectangle('Position',[20 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); - - if handles.n_seed > 2 - rectangle('Position',[40 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); - end - end - - % If the seed file is of length n_subjects, then we want a - % subject-specific seed scheme to run - elseif length(filename_seed) == handles.n_subjects{1} - - handles.isSeedSubjectSpecific = 1; - - % Then, we only allow one seed to be considered - handles.n_seed = 1; - - % For each entry (i.e., each subject data), we load the seed - for myindex = 1:length(filename_seed) - File_seed = fullfile(pathname_seed, filename_seed{myindex}); - tmp = load(File_seed); - assignin('base','tmp', tmp); - tmp = struct2array(tmp); - - % If the file is of suitable dimensions - if islogical(tmp) && size(tmp,2) == 1 && size(tmp,1) == sum(handles.mask{1}) - - % Then we put it in the handles, enable the plotting button, and - % make the seed selection button green - handles.seed(:,myindex) = tmp; - - handles.Log = CAP_AddToLog(handles.Log,'Seed chosen',{File_seed},{'Seed file'}); - else - errordlg('The file you entered appears to be of wrong dimensions...'); - handles = ClearDataButton_Callback(handles.ClearDataButton, eventdata, handles); - end - - end - - handles.seed_display = zeros(length(handles.seed(:,1)),1); - - % If we survive all the above, we can see that the seed files are - % good - handles.isSeedOK = true; - - % We also enable to plot the seed results - set(handles.PlotSeedButton,'Enable','on'); - set(handles.PlotSeedButton,'Visible','on'); - - set(handles.S_SEED1,'Visible','on'); - - set(handles.SeedButton,'BackgroundColor', [101,140,196]/255); - - set(handles.SeedPopup,'Enable','off'); - set(handles.SeedPopup,'Value',1); - handles.SeedType = 'Average'; - set(handles.CheckS1POS,'Visible','on'); - set(handles.CheckS1NEG,'Visible','on'); - - % Creates the seed information to plot in the subject-specific case - for idx_seed = 1:size(handles.seed,2) - - handles.seed_display = handles.seed_display + handles.seed(:,idx_seed); - end - - handles.seed_display = handles.seed_display/handles.n_subjects{1}; - - % We can now go through the next parts of the analysis, so we - % enable the related buttons - set(handles.TPSelectionButton,'Enable','on'); - set(handles.SeedMapPushButton,'Enable','on'); - - set(handles.TPSelectionButton,'Visible','on'); - set(handles.SeedMapPushButton,'Visible','on'); - - set(handles.PRadio,'Visible','on'); - set(handles.TRadio,'Visible','on'); - set(handles.uibuttongroup7,'Visible','on'); - set(handles.TText,'Visible','on'); - set(handles.TMotText,'Visible','on'); - set(handles.TEdit,'Visible','on'); - set(handles.TMotEdit,'Visible','on'); - - % We also see the displays for entering seed specifications - set(handles.SeedPopup,'Visible','on'); - - handles.Log = CAP_AddToLog(handles.Log,'Correct subject-specific seed data entered',{handles.n_seed},{'Seed amount'}); - - handles.seed_display = zeros(length(handles.seed(:,1)),1); - - set(handles.CheckS1POS,'Visible','on'); - set(handles.CheckS1NEG,'Visible','on'); - CheckS1POS_Callback(handles.CheckS1POS,eventdata,handles); - - set(handles.Seed1Text,'Visible','on'); - - set(handles.SeedPlusText,'Visible','on'); - set(handles.SeedMinusText,'Visible','on'); - - rectangle('Position',[0 0 10 10],'Curvature',[1 1],'FaceColor',[150,48,48]/255,'EdgeColor','none','Parent',handles.FancyCircles); - - set(handles.FancyCircles,'xlim',[-10 10]); - set(handles.FancyCircles,'ylim',[-10 10]); - - else - errordlg('Problem with the amount of seed files entered !'); - handles = ClearDataButton_Callback(handles.ClearDataButton, eventdata, handles); - end - - % Updates the limits of the plot - switch handles.n_seed - case 1 - set(handles.FancyCircles,'xlim',[-10 10]); - set(handles.FancyCircles,'ylim',[-10 10]); - case 2 - set(handles.FancyCircles,'xlim',[-10 30]); - set(handles.FancyCircles,'ylim',[-10 30]); - case 3 - set(handles.FancyCircles,'xlim',[-10 50]); - set(handles.FancyCircles,'ylim',[-10 50]); - end - -else - errordlg('You did not enter a seed file !'); - handles = ClearDataButton_Callback(handles.ClearDataButton, eventdata, handles); -end - -guidata(hObject, handles); - - - -% Executes when clicking on 'Plot Seed' -function PlotSeedButton_Callback(hObject, eventdata, handles) - -% Clears the present graph content -cla(handles.SeedGraphX); -cla(handles.SeedGraphZ); - -% Plots the slices within the graph windows -handles.SeedGraphX = plot_slice(handles.seed_display,get(handles.TVIS_Slider,... - 'Value'),1,handles.mask{handles.ReferencePopulation},handles.brain,... - handles.brain_info{handles.ReferencePopulation},'X',get(handles.SliderX,... - 'Value'),handles.SeedGraphX); - -handles.SeedGraphZ = plot_slice(handles.seed_display,get(handles.TVIS_Slider,... - 'Value'),1,handles.mask{handles.ReferencePopulation},handles.brain,... - handles.brain_info{handles.ReferencePopulation},'Z',get(handles.SliderZ,... - 'Value'),handles.SeedGraphZ); - -% Sets the sliders to visible -set(handles.SliderX,'Visible','on'); -set(handles.SliderZ,'Visible','on'); - -% Sets the text values at the ones of the sliders -set(handles.XCoordText,'String',['X: ',sprintf('%.2f',get(handles.SliderX,'Value'))]); -set(handles.ZCoordText,'String',['Z: ',sprintf('%.2f',get(handles.SliderZ,'Value'))]); - -% Sets the visibility of the slider texts to on -set(handles.XCoordText,'Visible','on'); -set(handles.ZCoordText,'Visible','on'); - -handles.Log = CAP_AddToLog(handles.Log,'Seed plots activated'); - -guidata(hObject, handles); - - - - - -%% Seed sliders interactions -% For the below functions, the goal is to change the value of the slider -% textboxes when the sliders are moved, and to update the graph display -% accordingly. For this purpose, cla is used to clear graph content prior -% to a new display - -% Executes on slider movement. -function SliderX_Callback(hObject, eventdata, handles) - -% Clears the content of the graph -cla(handles.SeedGraphX); - -% Gets the MNI slice coordinate value associated to the new display -set(handles.XCoordText,'String',['X: ',sprintf('%.2f',get(hObject,'Value'))]); - -% Slice plotting itself; 1.5 is the color past which the display saturates -handles.SeedGraphX = plot_slice(handles.seed_display,get(handles.TVIS_Slider,... - 'Value'),1,handles.mask{handles.ReferencePopulation},handles.brain,... - handles.brain_info{handles.ReferencePopulation},'X',get(hObject,'Value'),... - handles.SeedGraphX); - -guidata(hObject, handles); - - - -% Executes during object creation, after setting all properties. -function SliderX_CreateFcn(hObject, eventdata, handles) - -if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor',[.9 .9 .9]); -end - -guidata(hObject, handles); - - - -% Executes on slider movement. -function SliderZ_Callback(hObject, eventdata, handles) - -cla(handles.SeedGraphZ); -set(handles.ZCoordText,'String',['Z: ',sprintf('%.2f',get(hObject,'Value'))]); -handles.SeedGraphZ = plot_slice(handles.seed_display,get(handles.TVIS_Slider,... - 'Value'),1,handles.mask{handles.ReferencePopulation},handles.brain,... - handles.brain_info{handles.ReferencePopulation},'Z',get(hObject,'Value'),... - handles.SeedGraphZ); - -guidata(hObject, handles); - - - -% Executes during object creation, after setting all properties. -function SliderZ_CreateFcn(hObject, eventdata, handles) - -if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor',[.9 .9 .9]); -end - -guidata(hObject, handles); - - - - - -%% Seed Map Computation Button -% When pressing on this button, classical seed maps are computed for the -% population of subjects chosen as the reference one in the loading part. -% The last entered seed is used - -function SeedMapPushButton_Callback(hObject, eventdata, handles) - -% Computes seed maps for each subject and for the population, using the -% data from the chosen reference population -[~,handles.AvgSeedMap] = CAP_Compute_SeedMap(handles.TC{handles.ReferencePopulation},handles.seed,handles.isSeedSubjectSpecific); - -% Graphical displays - -% Making the plots, texts and sliders visible -set(handles.SeedMapX,'Visible','on'); -set(handles.SeedMapZ,'Visible','on'); - -set(handles.SeedMap_SliderX,'Visible','on'); -set(handles.SeedMap_SliderZ,'Visible','on'); -set(handles.SeedMapSliderX,'Visible','on'); -set(handles.SeedMapSliderZ,'Visible','on'); - -set(handles.TSeed_Slider,'Visible','on'); -set(handles.TSeed,'Visible','on'); -set(handles.ColorbarSeed,'Visible','on'); - -% Writing down the text with current MNI coordinates -set(handles.SeedMap_SliderX,'String',['X: ',sprintf('%.2f',get(handles.SeedMapSliderX,'Value'))]); -set(handles.SeedMap_SliderZ,'String',['Z: ',sprintf('%.2f',get(handles.SeedMapSliderZ,'Value'))]); - -% Clears previous plot contents (in case we want to re-plot after changing -% the seed) -cla(handles.SeedMapX); -cla(handles.SeedMapZ); - -% Plots new slices -handles.SeedMapX = plot_slice(handles.AvgSeedMap,0.25,1,handles.mask{handles.ReferencePopulation},handles.brain,handles.brain_info{handles.ReferencePopulation},'X',get(handles.SeedMapSliderX,'Value'),handles.SeedMapX); -handles.SeedMapZ = plot_slice(handles.AvgSeedMap,0.25,1,handles.mask{handles.ReferencePopulation},handles.brain,handles.brain_info{handles.ReferencePopulation},'Z',get(handles.SeedMapSliderZ,'Value'),handles.SeedMapZ); - -% Adds the colorbar for the seed maps (between -1 and 1) -handles.ColorbarSeed = Create_CAP_colorbar(-1,1,0.5,get(handles.TSeed_Slider,'Value'),'',handles.ColorbarSeed,'Horizontal','div','RdBu',1000); - -handles.Log = CAP_AddToLog(handles.Log,'Seed maps displayed'); - -guidata(hObject,handles); - - - - - -%% Slider Controls (MNI coordinates) -% We want to reload the seed images with the new parameters when changing a -% slider, so we clear the previous display, change the text summarizing the -% MNI coordinate where we stand, and plot the new image - -function SeedMapSliderX_Callback(hObject, eventdata, handles) - -% Clears graphs -cla(handles.SeedMapX); - -% Changes slider texts -set(handles.SeedMap_SliderX,'String',['X: ',sprintf('%.2f',get(hObject,'Value'))]); - -% Plots new slices -handles.SeedMapX = plot_slice(handles.AvgSeedMap,get(handles.TSeed_Slider,'Value'),1,handles.mask{handles.ReferencePopulation},handles.brain,handles.brain_info{handles.ReferencePopulation},'X',get(hObject,'Value'),handles.SeedMapX); - -guidata(hObject, handles); - - - -function SeedMapSliderX_CreateFcn(hObject, eventdata, handles) - -if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor',[.9 .9 .9]); -end - - - -function SeedMapSliderZ_Callback(hObject, eventdata, handles) - -cla(handles.SeedMapZ); - -% Changes slider texts -set(handles.SeedMap_SliderZ,'String',['Z: ',sprintf('%.2f',get(hObject,'Value'))]); - -% Plots new slices -handles.SeedMapZ = plot_slice(handles.AvgSeedMap,get(handles.TSeed_Slider,'Value'),1,handles.mask{handles.ReferencePopulation},handles.brain,handles.brain_info{handles.ReferencePopulation},'Z',get(hObject,'Value'),handles.SeedMapZ); - -guidata(hObject, handles); - - - -% --- Executes during object creation, after setting all properties. -function SeedMapSliderZ_CreateFcn(hObject, eventdata, handles) -% hObject handle to SeedMapSliderZ (see GCBO) -% eventdata reserved - to be defined in a future version of MATLAB -% handles empty - handles not created until after all CreateFcns called - -% Hint: slider controls usually have a light gray background. -if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor',[.9 .9 .9]); -end - - - - - -%% Slider controls (visualization threshold) -% We want to replot and modify the colorbar according to the visualization -% threshold that we select (and also change the text) - -function TSeed_Slider_Callback(hObject, eventdata, handles) - -% Clears previous plot contents -cla(handles.SeedMapX); -cla(handles.SeedMapZ); - -% Plots new slices (average seed maps) -handles.SeedMapX = plot_slice(handles.AvgSeedMap,get(hObject,'Value'),1,... - handles.mask{handles.ReferencePopulation},handles.brain,... - handles.brain_info{handles.ReferencePopulation},'X',... - get(handles.SeedMapSliderX,'Value'),handles.SeedMapX); - -handles.SeedMapZ = plot_slice(handles.AvgSeedMap,get(hObject,'Value'),1,... - handles.mask{handles.ReferencePopulation},handles.brain,... - handles.brain_info{handles.ReferencePopulation},'Z',... - get(handles.SeedMapSliderZ,'Value'),handles.SeedMapZ); - -% Modifies the text -set(handles.TSeed,'String',['Tv: ',sprintf('%.2f',get(hObject,'Value'))]); - -% Clears and replots the colorbar -cla(handles.ColorbarSeed); -handles.ColorbarSeed = Create_CAP_colorbar(-1,1,0.5,get(hObject,'Value'),'',handles.ColorbarSeed,'Horizontal','div','RdBu',1000); - -guidata(hObject,handles); - - - -function TSeed_Slider_CreateFcn(hObject, eventdata, handles) - -if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor',[.9 .9 .9]); -end - - - - - -%% Motion parameter entry -% In the following, we have the functions controling the motion threshold -% (Power's FD) value - -function TMotEdit_Callback(hObject, eventdata, handles) - -% If we enter a reasonable value, it is taken as a new threshold -if ~isempty(str2double(get(hObject,'String'))) && (str2double(get(hObject,'String')) > 0) && (str2double(get(hObject,'String')) <= 0.5) - handles.Tmot = str2double(get(hObject,'String')); - set(hObject,'BackgroundColor', [101,140,196]/255); - - handles.Log = CAP_AddToLog(handles.Log,'Valid motion threshold value entered',{handles.Tmot},{'Motion threshold value'}); - -% If we set something wrong again, we set the threshold value back to the -% default of 0.5 -else - set(hObject,'BackgroundColor', [203,146,146]/255); - handles.Tmot = 0.5; -end - -guidata(hObject, handles); - - - -% When clicking on the motion button -function handles = TMotEdit_ButtonDownFcn(hObject, eventdata, handles) - -set(hObject,'Enable','on'); -set(hObject,'String',''); -set(hObject,'FontAngle','normal'); -uicontrol(hObject); - -guidata(hObject, handles); - - - -% When the object is created -function handles = TMotEdit_CreateFcn(hObject, eventdata, handles) - -set(hObject,'Enable','off'); -set(hObject,'String','Click to enter...'); -set(hObject,'FontAngle','italic'); - -if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor','r'); -end - -guidata(hObject, handles); - - - - - -%% Frame selection parameter entry -% What comes below describes the control of the frame selection threshold -% value entered by the user - -% Threshold for selection of the frames to keep -function TEdit_Callback(hObject, eventdata, handles) - -% If we enter a reasonable value, it is taken as the new threshold -if ~isempty(str2double(get(hObject,'String'))) && (str2double(get(hObject,'String')) > 0) && (str2double(get(hObject,'String')) <= 100) - handles.T = str2double(get(hObject,'String')); - set(hObject,'BackgroundColor', [101,140,196]/255); - - handles.Log = CAP_AddToLog(handles.Log,'Valid (de)activation threshold entered',{handles.T},{'Threshold value'}); - -% If we set something wrong again, we set the threshold value back to the -% default of 0.5 -else - set(hObject,'BackgroundColor',[203,146,146]/255); - handles.T = 0.5; -end - -guidata(hObject, handles); - - - -% When the object is created -function handles = TEdit_CreateFcn(hObject, eventdata, handles) - -set(hObject,'Enable','off'); -set(hObject,'String','Click to enter...'); -set(hObject,'FontAngle','italic'); - -if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor','white'); -end -guidata(hObject, handles); - - - -% When clicking on it -function TEdit_ButtonDownFcn(hObject,eventdata,handles) - -set(hObject,'Enable','on'); -set(hObject,'String',''); -set(hObject,'FontAngle','normal'); -uicontrol(hObject); - -guidata(hObject,handles); - - - - - -%% Frame selection mode control buttons -% The below code is run when the user wishes to change between frame -% selection modes - -% If we select threshold, we update the Selmode accordingly -function TRadio_Callback(hObject, eventdata, handles) - -set(handles.TText,'String','T [-]'); -handles.SelMode = 'Threshold'; - -handles.Log = CAP_AddToLog(handles.Log,'Changed time points selection scheme',{handles.SelMode},{'Selected mode'}); - -guidata(hObject,handles); - - - -% Same for percentage -function PRadio_Callback(hObject, eventdata, handles) - -set(handles.TText,'String','P [%]'); -handles.SelMode = 'Percentage'; - -handles.Log = CAP_AddToLog(handles.Log,'Changed time points selection scheme',{handles.SelMode},{'Selected mode'}); - -guidata(hObject,handles); - - - - - -%% Time points selection control -% When clicking on the select time points button, the frames matching the -% provided thresholds for scrubbing and for frame retention are selected. -% Both activation and deactivation frames are selected. This is performed -% on all the loaded populations of subjects - -% Upon clicking on the 'Select time points' button -function TPSelectionButton_Callback(hObject, eventdata, handles) - -% Clears the current plot display (for the case of having already computed -% something before with other parameters) -cla(handles.TPViolin); - -% Performs the analysis to extract frames of activity for all loaded -% populations (done for each dataset) -for n_ds = 1:handles.n_datasets - % Xonp and Xonn contain the frames (deactivation frames have been - % switched in sign, so that deactivation is positive) - [handles.Xonp{n_ds},p,Indices,... - handles.idx_sep_seeds{n_ds}] = CAP_find_activity(handles.TC{n_ds},... - handles.seed,handles.T,handles.FD{n_ds},handles.Tmot,... - handles.SelMode,handles.SeedType,handles.SignMatrix,handles.isSeedSubjectSpecific); - - % Percentage of retained frames across subjects - handles.RetainedPercentage{n_ds} = p(3,:); - - % Indices of the frames that have been retained (used later for metrics - % computations) - handles.FrameIndices{n_ds} = Indices; -end - -% Enables to go to the next step of the analysis and cluster the extracted -% frames -set(handles.ClusterButton,'Enable','on'); -set(handles.CCButton,'Enable','on'); - -set(handles.ClusterButton,'Visible','on'); -set(handles.CCButton,'Visible','on'); - -% Sets related displays to visible -set(handles.CAP_Kmax,'Visible','on'); -set(handles.CAP_PPC,'Visible','on'); -set(handles.CAP_N,'Visible','on'); -set(handles.CAP_K,'Visible','on'); -set(handles.CAP_PP,'Visible','on'); -set(handles.CAP_PN,'Visible','on'); - -set(handles.KRange_Edit,'Visible','on'); -set(handles.PCC_Edit,'Visible','on'); -set(handles.ClusterEdit,'Visible','on'); -set(handles.ClusterRepEdit,'Visible','on'); -set(handles.ClusterPpEdit,'Visible','on'); -set(handles.ClusterPnEdit,'Visible','on'); - -tmp_toplot = ConcatMat(handles.RetainedPercentage,handles.n_datasets,1,handles.n_subjects,'FD'); - -% Displays the violin plot of subject scrubbing percentage for the -% reference population -[~,~,handles.TPViolin] = MakeViolin(tmp_toplot,handles.TPViolin,{' '},'Frames ret. [%]',handles.PopColor,handles.n_datasets,1); -set(handles.TPViolin,'Visible','on'); - -clear tmp_toplot - -handles.Log = CAP_AddToLog(handles.Log,'Time points selected',{['1 to ',num2str(handles.n_datasets)],handles.SelMode,handles.T,handles.Tmot},{'Datasets indices','Selection mode','Activation threshold','Motion threshold'}); - -guidata(hObject, handles); - - - - - -%% Data saving and loading -% The functions below are summoned when the user wishes to save his/her -% data into a MATLAB structure, or to load previously processed data (until -% the end of spatio-temporal selection) and attempt clustering computations -% and CAP generation - -% Save folder change function -function SaveFolderButton_Callback(hObject, eventdata, handles) - -% Selection of a directory -[dirname]=uigetdir('*.*','Please select a save directory'); -handles.savedir = dirname; - -% If the user has indeed chosen a directory, we set it as the new save -% folder -if ~isequal(dirname,0) - set(handles.SaveFolderText,'String',handles.savedir); - set(hObject,'BackgroundColor', [101,140,196]/255); - - handles.Log = CAP_AddToLog(handles.Log,'Save folder changed',... - {handles.savedir},... - {'Save folder'}); -else - errordlg('Please select a directory !'); -end - -guidata(hObject,handles); - -% Upon clicking on the 'SAVE' button, the data will be saved entirely under -% a file name partly chosen by the user and partly depending on the present -% date and time -function SaveButton_Callback(hObject, eventdata, handles) - -% Upon pressing the save button, we want to save all the important -% information into a big matlab structure -SAVED = []; - -% General information on the project -SAVED.ProjectInfo.title = handles.project_title; -SAVED.ProjectInfo.date = date; - -% Name of the files that were loaded -SAVED.SubjData.SubjFileNames = handles.SubjNames; -SAVED.SubjData.MotFileNames = handles.MotName; - -% Dimension over time and voxels of the files analyzed -SAVED.SubjData.Dimensions.TP = handles.SubjSize.TP; -SAVED.SubjData.Dimensions.VOX = handles.SubjSize.VOX; - -% Number of subjects considered -SAVED.SubjData.n_subjects = handles.n_subjects; - -% TR of the experiment -SAVED.SubjData.TR = handles.TR; - -% Information about the NIFTI files used (dimensions, mapping between real -% world and index) -SAVED.BrainData.brain_info = handles.brain_info; - -% Mask that was used on the considered data -SAVED.BrainData.mask = handles.mask; - -% Seed used for the analysis -SAVED.BrainData.seed = handles.seed; - -% Motion threshold and activation threshold used in time points selection -SAVED.TPSelData.Tmot = handles.Tmot; -SAVED.TPSelData.T = handles.T; - -% Type of frame selection used -SAVED.TPSelData.SelMode = handles.SelMode; - -% Frames that were considered in the clustering process -% SAVED.TPSelData.Act = handles.Xonp; - -% Type of seed computation chosen ('Avg','Union','Intersection') -SAVED.TPSelData.SeedType = handles.SeedType; - -% Indices, for each seed used, of the retained frames across subjects -SAVED.TPSelData.idx_sep_seeds = handles.idx_sep_seeds; - -% Percentage frames retained for the clustering -SAVED.TPSelData.PercRetained = handles.RetainedPercentage; - -% Computed seed maps (average and subject-wise) -SAVED.SeedMap.AvgMap = handles.AvgSeedMap; - -% Parameters used for clustering -SAVED.ClusterData.N = handles.n_rep; -SAVED.ClusterData.K = handles.K; -SAVED.ClusterData.Pp = handles.Pp; -SAVED.ClusterData.Pn = handles.Pn; - -% CAP data -SAVED.ClusterData.CAPs = handles.CAP; -SAVED.ClusterData.StdCAPs = handles.STDCAP; -SAVED.ClusterData.idx = handles.idx; - -% Computed metrics -SAVED.Metrics.TPM = handles.TPM; -SAVED.Metrics.Counts = handles.Counts; -SAVED.Metrics.Number = handles.Number; -SAVED.Metrics.Avg_Duration = handles.Avg_Duration; -SAVED.Metrics.Duration = handles.Duration; -SAVED.Metrics.TM = handles.TM; -SAVED.Metrics.SeedFrac = handles.sfrac; -SAVED.Metrics.From_Baseline = handles.From_Baseline; -SAVED.Metrics.To_Baseline = handles.To_Baseline; -SAVED.Metrics.Resilience = handles.Resilience; -SAVED.Metrics.Baseline_resilience = handles.Baseline_resilience; -SAVED.Metrics.Betweenness = handles.Betweenness; -SAVED.Metrics.kin = handles.kin; -SAVED.Metrics.kout = handles.kout; - -%[tmp_date,tmp_date2] = strtok(date,'-'); -%[tmp_date2,tmp_date3] = strtok(tmp_date2,'-'); -%tmp_date3 = strtok(tmp_date3,'-'); - -% Name that will be given to the saved files -% fancy_name = [handles.project_title,'_',tmp_date,'_',tmp_date2,'_',tmp_date3,'_',... -% num2str(hour(now)),'_',num2str(minute(now)),'_',... -% num2str(round(second(now)))]; - -fancy_name = [handles.project_title]; - -% Saves NIFTI files storing the CAPs in MNI space -CAPToNIFTI(handles.CAP,... - handles.mask{handles.ReferencePopulation},handles.brain_info{handles.ReferencePopulation},... - handles.savedir,['CAP_NIFTI_',fancy_name]); - -% Saves the different variables from the program -save(fullfile(handles.savedir,fancy_name),'SAVED','-v7.3'); - -% Adds the save process to the log -handles.Log = CAP_AddToLog(handles.Log,'Data saved'); - -% Writes a log .txt file with what has been done so far -file_ID = fopen(fullfile(handles.savedir,[fancy_name,'.txt']),'wt'); - -for i = 1:length(handles.Log) - for j = 1:length(handles.Log{i}) - fprintf(file_ID,[handles.Log{i}{j},'\n']); - end - fprintf(file_ID,'\n'); -end - -fclose(file_ID); - -% Clears the structure now that it has been saved -clear SAVED - -guidata(hObject,handles); - - - -% This function is summoned when the user wishes to load previously -% computed data and pursue an analysis -function LoadButton_Callback(hObject, eventdata, handles) - -% We want to verify -[filename1,pathname1]=uigetfile({'*.*','All Files'},... - 'Select struct file to load...','MultiSelect','on'); - - % If the user has indeed entered a file - if ~isequal(filename1,0) || ~isequal(pathname1,0) - - Loaded_File = fullfile(pathname1, filename1); - tmp = load(Loaded_File); - assignin('base','tmp', tmp); - tmp = struct2array(tmp); - end - - % Checks that the right fields do exist - try - if tmp.SubjData.n_subjects{1} > 3 &&... - ~isempty(tmp.BrainData.brain_info) &&... - sum(tmp.BrainData.mask{1}) == size(tmp.BrainData.seed,1) && ... - length(tmp.TPSelData.Act{1}) == tmp.SubjData.n_subjects{1} &&... - size(tmp.TPSelData.Act{1}{1},1) == sum(tmp.BrainData.mask{1}) - - handles.brain_info = tmp.BrainData.brain_info; - handles.Xonp = tmp.TPSelData.Act{1}; - handles.mask = tmp.BrainData.mask; - handles.SeedType = tmp.TPSelData.SeedType; - handles.idx_sep_seeds = tmp.TPSelData.idx_sep_seeds; - - % Makes the LOAD button green to indicate that loading worked - set(handles.LoadButton,'BackgroundColor',[59 113 86]/255); - - % Enables to go to the next step of the analysis and cluster the extracted - % frames - set(handles.ClusterButton,'Enable','on'); - set(handles.CCButton,'Enable','on'); - - set(handles.ClusterButton,'Visible','on'); - set(handles.CCButton,'Visible','on'); - - % Sets related displays to visible - set(handles.CAP_Kmax,'Visible','on'); - set(handles.CAP_PPC,'Visible','on'); - set(handles.CAP_N,'Visible','on'); - set(handles.CAP_K,'Visible','on'); - set(handles.CAP_PP,'Visible','on'); - set(handles.CAP_PN,'Visible','on'); - - set(handles.KRange_Edit,'Visible','on'); - set(handles.PCC_Edit,'Visible','on'); - set(handles.ClusterEdit,'Visible','on'); - set(handles.ClusterRepEdit,'Visible','on'); - set(handles.ClusterPpEdit,'Visible','on'); - set(handles.ClusterPnEdit,'Visible','on'); - end - catch - errordlg('Problem in loading the data...'); - end - -guidata(hObject,handles); - - - -% Executes when pressing on the 'CLEAR' button for data loading; supposed -% to set everything back to normal (when the window opened) -function handles = ClearDataButton_Callback(hObject, eventdata, handles) - - handles = ClearSection1(eventdata,handles); - handles = ClearSection2(eventdata,handles); - handles = ClearSection3(eventdata,handles); - handles = ClearSection4(eventdata,handles); - - % Loads and sets the brain underlay used for plotting purposes - Underlay = load_nii('Underlay.nii'); - Underlay_mat = [Underlay.hdr.hist.srow_x; Underlay.hdr.hist.srow_y; Underlay.hdr.hist.srow_z; 0 0 0 1]; - Underlay_dim = Underlay.hdr.dime.dim; - Underlay_dim = Underlay_dim(2:4); - handles.Underlay_info.dim = Underlay_dim; - handles.Underlay_info.mat = Underlay_mat; - clear Underlay - clear Underlay_dim - clear Underlay_mat - load('brain.mat'); - assignin('base','brain', brain); - handles.brain = brain; - clear brain - - handles.Log = CAP_AddToLog(handles.Log,'Data cleared'); - -guidata(hObject, handles); - - - -% Clears the content of section 1 only -function handles = ClearSection1(eventdata, handles) - -% Makes 'A. Load data' red again -set(handles.DataButton,'BackgroundColor',[204,146,146]/255); - -% Same for Save folder button -set(handles.SaveFolderButton,'BackgroundColor',[204,146,146]/255); - -% Resets the time point and voxel parameters -handles.SubjSize.TP = -inf; -handles.SubjSize.VOX = -inf; - -% Resets the TR -handles.TR = -inf; -handles.isTROK = false; - -% Resets the reference population -handles.ReferencePopulation = 1; - -handles = ProjectTitleText_CreateFcn(handles.ProjectTitleText,eventdata,handles); - -% Also resets the number of subjects variable and associated text -set(handles.Dimensionality_Text, 'String','_ frames x _ voxels (_)'); -handles.n_subjects = {}; - -% Resets the number of datasets entered to 0 -handles.n_datasets = 0; - -% Empties the data, motion, brain information and mask variables -handles.TC = {}; -handles.FD = {}; -handles.mask = {}; -handles.brain_info = {}; - -% Resets the text related to motion and data files -handles.SubjNames = {}; -handles.MotName = {}; - -% Resets the title and save folder information -handles.Log = {}; - -% Project title, by default 'Untitled' -handles.project_title = 'Untitled'; - -set(handles.LoadButton,'BackgroundColor',[51 75 163]/255); - -% Directory to which data is to be saved (initially loaded as ./SavedData) -handles.savedir = fullfile(pwd,'SavedData'); -set(handles.SaveFolderText,'String',handles.savedir); - -%%%%%%%%%% Putting the loading part (bottom) back to normal %%%%%%%%%%% - -% We also want to set the TR textbox back to its initial state -handles = TR_Entry_CreateFcn(handles.TR_Entry, eventdata, handles); - - - -% Clears the content of section 1 only -function handles = ClearSection2(eventdata, handles) - -% Puts back the seed buttons information to original state -handles.seed = []; -set(handles.SeedButton,'BackgroundColor',[204,146,146]/255); -set(handles.SeedButton,'Enable','off'); -set(handles.PlotSeedButton,'Enable','off'); -set(handles.PlotSeedButton,'Visible','off'); - -% Seed label entries set invisible -set(handles.S_SEED1,'Visible','off'); -set(handles.S_SEED2,'Visible','off'); -set(handles.S_SEED3,'Visible','off'); - -% Puts back the logical defining the type of seed information entered -handles.isSeedSubjectSpecific = 0; - -% Removes graph display for the seed -cla(handles.SeedGraphX); -cla(handles.SeedGraphZ); -set(handles.SeedGraphX,'Visible','off'); -set(handles.SeedGraphZ,'Visible','off'); -set(handles.SliderX,'Visible','off'); -set(handles.SliderZ,'Visible','off'); -set(handles.XCoordText,'Visible','off'); -set(handles.ZCoordText,'Visible','off'); - -%%%%%%%%%%%% Putting the seed map part back to normal %%%%%%%%%%%%%%%%%%% - -% Resets the variable containing the seed maps of the subjects -handles.AvgSeedMap = []; - -% Not clickable anymore -set(handles.SeedMapPushButton,'Enable','off'); -set(handles.SeedMapPushButton,'Visible','off'); - -% Resets colorbar display -handles = ResetGraphDisplay(handles.ColorbarSeed,handles); - -% Makes the slider and the text linked to slider of the seed map threshold -% back to invisible -set(handles.TSeed_Slider,'Visible','off'); -set(handles.TSeed,'Visible','off'); - -% Resets graphs with seed map plots -handles = ResetGraphDisplay(handles.SeedMapX,handles); -handles = ResetGraphDisplay(handles.SeedMapZ,handles); - -% Resets associated sliders -set(handles.SeedMapSliderX,'Visible','off'); -set(handles.SeedMapSliderZ,'Visible','off'); - -% Resets associated slider texts -set(handles.SeedMap_SliderX,'Visible','off'); -set(handles.SeedMap_SliderZ,'Visible','off'); - -% Resets the circles plot -handles = ResetGraphDisplay(handles.FancyCircles,handles); - -% Sets the associated text back to invisible -set(handles.SeedPlusText,'Visible','off'); -set(handles.SeedMinusText,'Visible','off'); -set(handles.Seed1Text,'Visible','off'); -set(handles.Seed2Text,'Visible','off'); -set(handles.Seed3Text,'Visible','off'); - -% Puts the seed boxes back to not visible -set(handles.CheckS1POS,'Visible','off'); -set(handles.CheckS2POS,'Visible','off'); -set(handles.CheckS3POS,'Visible','off'); -set(handles.CheckS1NEG,'Visible','off'); -set(handles.CheckS2NEG,'Visible','off'); -set(handles.CheckS3NEG,'Visible','off'); - -set(handles.TPSelectionButton,'Enable','off'); -set(handles.TPSelectionButton,'Visible','off'); - -set(handles.PRadio,'Visible','off'); -set(handles.TRadio,'Visible','off'); -set(handles.uibuttongroup7,'Visible','off'); -set(handles.TText,'Visible','off'); -set(handles.TMotText,'Visible','off'); -set(handles.TEdit,'Visible','off'); -set(handles.TMotEdit,'Visible','off'); - -% Resets the frame selection mode -handles.SelMode = 'Threshold'; - -% Invisible Seed type list -set(handles.SeedPopup,'Visible','off'); - -% Reinitializes motion and the motion box -handles.Tmot = 0.5; -handles = TMotEdit_CreateFcn(handles.TMotEdit,eventdata,handles); - -% Reinitializes frame selection threshold and the linked box -handles.T = 0.5; -handles = TEdit_CreateFcn(handles.TEdit,eventdata,handles); - -% Resets the frame and percentage retention variables -handles.Xonp = {}; -handles.Xonn = {}; -handles.RetainedPercentage = {}; -handles.FrameIndices = {}; - -% Resets the variables indexing seed selection time points retained -handles.idx_sep_seeds = {}; -handles.sfrac = []; - -% Resets the violin plot with percentage retained frames -handles = ResetGraphDisplay(handles.TPViolin,handles); - - - -% Clears the content of section 3 only -function handles = ClearSection3(eventdata, handles) - -set(handles.ClusterButton,'Enable','off'); -set(handles.CCButton,'Enable','off'); -set(handles.AssignButton,'Enable','off'); -set(handles.AssignButton,'Visible','off'); - -set(handles.CCPlot,'Visible','off'); -cla(handles.CCPlot); - -set(handles.CAP_TP,'Visible','off'); -set(handles.Percentile_Edit,'Visible','off'); - -set(handles.CAP_Kmax,'Visible','off'); -set(handles.CAP_PPC,'Visible','off'); -set(handles.CAP_N,'Visible','off'); -set(handles.CAP_K,'Visible','off'); -set(handles.CAP_PP,'Visible','off'); -set(handles.CAP_PN,'Visible','off'); - -set(handles.KRange_Edit,'Visible','off'); -set(handles.PCC_Edit,'Visible','off'); -set(handles.ClusterEdit,'Visible','off'); -set(handles.ClusterRepEdit,'Visible','off'); -set(handles.ClusterPpEdit,'Visible','off'); -set(handles.ClusterPnEdit,'Visible','off'); - -% Resets the consensus clustering parameter input boxes -handles = KRange_Edit_CreateFcn(handles.KRange_Edit,eventdata,handles); -handles = PCC_Edit_CreateFcn(handles.PCC_Edit,eventdata,handles); - -% Resets the parameter input boxes -handles = ClusterEdit_CreateFcn(handles.ClusterEdit,eventdata,handles); -handles = ClusterRepEdit_CreateFcn(handles.ClusterRepEdit,eventdata,handles); -handles = ClusterPpEdit_CreateFcn(handles.ClusterPpEdit,eventdata,handles); -handles = ClusterPnEdit_CreateFcn(handles.ClusterPnEdit,eventdata,handles); -handles = Percentile_Edit_CreateFcn(handles.Percentile_Edit,eventdata,handles); - -% Resets the consensus clustering parameters themselves -handles.Kmax = 12; -handles.PCC = 80; - -set(handles.CCButton,'Visible','off'); -set(handles.ClusterButton,'Visible','off'); - -set(handles.PIE_S1,'Visible','off'); -set(handles.PIE_S2,'Visible','off'); -set(handles.PIE_S3,'Visible','off'); -set(handles.PIE_S1S2,'Visible','off'); -set(handles.PIE_S2S3,'Visible','off'); -set(handles.PIE_S1S3,'Visible','off'); -set(handles.PIE_S1S2S3,'Visible','off'); - -% Resets the parameters themselves -handles.K = 5; -handles.n_rep = 20; -handles.Pp = 100; -handles.Pn = 100; -handles.percentile = 5; - -% Resets the CAP parameters (CAPs, standard deviation within CAPs and -% indices of the CAPs to which all retained frames were assigned) -handles.CAP = []; -handles.STDCAP = []; -handles.idx = {}; - -% Resets the graph display of the CAP colorbar -handles = ResetGraphDisplay(handles.ColorbarCAP,handles); - -% Reset all graph displays for the CAPs -tmpX = {handles.CAP1X,handles.CAP2X,handles.CAP3X,handles.CAP4X,handles.CAP5X}; -tmpY = {handles.CAP1Y,handles.CAP2Y,handles.CAP3Y,handles.CAP4Y,handles.CAP5Y}; -tmpZ = {handles.CAP1Z,handles.CAP2Z,handles.CAP3Z,handles.CAP4Z,handles.CAP5Z}; -tmpF = {handles.CAP1_Frames,handles.CAP2_Frames,handles.CAP3_Frames,handles.CAP4_Frames,handles.CAP5_Frames}; - - -for i_CAP = 1:5 - set(tmpF{i_CAP},'Visible','off'); - handles = ResetGraphDisplay(tmpX{i_CAP},handles); - handles = ResetGraphDisplay(tmpY{i_CAP},handles); - handles = ResetGraphDisplay(tmpZ{i_CAP},handles); -end - -% Resets the sliders and the textboxes for the CAPs -set(handles.CAP_SliderX,'Visible','off'); -set(handles.CAP_SliderY,'Visible','off'); -set(handles.CAP_SliderZ,'Visible','off'); -set(handles.CAP_XC,'Visible','off'); -set(handles.CAP_YC,'Visible','off'); -set(handles.CAP_ZC,'Visible','off'); - -% Resets the slider and textbox for the CAPs visualization threshold -set(handles.TVIS_Slider,'Visible','off'); -set(handles.TVIS,'Visible','off'); - -% Resets the pie charts -handles = ResetGraphDisplay(handles.pie1,handles); -handles = ResetGraphDisplay(handles.pie2,handles); -handles = ResetGraphDisplay(handles.pie3,handles); -handles = ResetGraphDisplay(handles.pie4,handles); -handles = ResetGraphDisplay(handles.pie5,handles); - -handles = ResetGraphDisplay(handles.ColorbarSimMat,handles); -handles = ResetGraphDisplay(handles.CAP_Mat,handles); - - - -% Clears the content of section 4 only -function handles = ClearSection4(eventdata, handles) - -set(handles.MetricsButton,'Enable','off'); -set(handles.MetricsButton,'Visible','off'); - -% Resets the metrics variables -handles.TPM = {}; -handles.Counts = {}; -handles.Number = {}; -handles.Avg_Duration = {}; -handles.Duration = {}; -handles.TM = {}; -handles.TPMCum = {}; - -% Set the sliding lists of subjects invisible again -set(handles.SubjectMenuMetrics,'Visible','off'); -set(handles.StateMenu,'Visible','off'); - -% Resets the colorbars from the metrics part -handles = ResetGraphDisplay(handles.ColorbarTransMat,handles); - -% Resets all the graphs from the metrics part -handles = ResetGraphDisplay(handles.TMGraph,handles); -handles = ResetGraphDisplay(handles.TM_Subject,handles); -handles = ResetGraphDisplay(handles.DynStates,handles); -handles = ResetGraphDisplay(handles.CumStates,handles); -handles = ResetGraphDisplay(handles.ViolinCounts,handles); -handles = ResetGraphDisplay(handles.ViolinCountsFrac,handles); -handles = ResetGraphDisplay(handles.ViolinNumber,handles); -handles = ResetGraphDisplay(handles.ViolinDuration,handles); - -% Removes all the labels linked to Metrics displays -set(handles.DS_Scrubbed,'Visible','off'); -set(handles.DS_NotSelected,'Visible','off'); -set(handles.DS_Unassigned,'Visible','off'); - -tmp = {handles.DS_CAP1,handles.DS_CAP2,handles.DS_CAP3,handles.DS_CAP4,... - handles.DS_CAP5,handles.DS_CAP6,handles.DS_CAP7,handles.DS_CAP8,... - handles.DS_CAP9,handles.DS_CAP10,handles.DS_CAP11,handles.DS_CAP12}; - -for i = 1:length(tmp) - set(tmp{i},'Visible','off'); -end - -clear tmp - -tmp = {handles.V_POP1,handles.V_POP2,handles.V_POP3,handles.V_POP4}; - -for i = 1:length(tmp) - set(tmp{i},'Visible','off'); -end - -clear tmp - - -% The following functions enable to modify the text of the project title -function ProjectTitleText_ButtonDownFcn(hObject, eventdata, handles) - -set(hObject,'Enable','on'); -set(hObject,'String',''); -set(hObject,'FontAngle','normal'); -uicontrol(hObject); - -guidata(hObject,handles); - - -function handles = ProjectTitleText_CreateFcn(hObject, eventdata, handles) - -if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor','white'); -end - -set(hObject,'Enable','off'); -set(hObject,'String','Click to enter...'); -set(hObject,'FontAngle','italic'); - -guidata(hObject, handles); - - - -function ProjectTitleText_Callback(hObject, eventdata, handles) - -% If we have entered a valid string, then we name the project as such -if ~isempty((get(hObject,'String'))) - handles.project_title = get(hObject,'String'); - set(hObject,'BackgroundColor', [101,140,196]/255); - - handles.Log = CAP_AddToLog(handles.Log,'Valid project title entered',{handles.project_title},{'New project title'}); - -% If we haven't entered anything, the project is just named 'untitled' -else - handles.project_title = 'Untitled'; - set(hObject,'BackgroundColor',[204,146,146]/255); -end - -guidata(hObject, handles); - - - - - -%% Consensus clustering functions to compute optimal K -% The functions below enable to determine the optimal values of cluster -% number into which to disentangle the CAPs data - -% Executes when pressing on the button to run consensus clustering -function CCButton_Callback(hObject, eventdata, handles) - - % Computes the consensus results - [Consensus] = CAP_ConsensusClustering(handles.Xonp{handles.ReferencePopulation},2:handles.Kmax,'items',handles.PCC/100,handles.n_rep,'correlation'); - - % Calculates the quality metrics - [~,Lorena] = ComputeClusteringQuality(Consensus,2:handles.Kmax); - - set(handles.CCPlot,'Visible','on'); - tmp_plot = bar(2:handles.Kmax,1-Lorena,'Parent',handles.CCPlot); - xlabel(get(tmp_plot(1),'Parent'),'Cluster number K'); - ylabel(get(tmp_plot(1),'Parent'),'Stability'); - xlim(get(tmp_plot(1),'Parent'),[2-0.6,handles.Kmax+0.6]); - ylim(get(tmp_plot(1),'Parent'),[0,1]); - custom_cm = cbrewer('seq','Reds',25); - colormap(handles.CCPlot,custom_cm(6:25,:)); - -guidata(hObject,handles); - - - -function KRange_Edit_Callback(hObject, eventdata, handles) - -if ~isempty(str2double(get(hObject,'String'))) && (str2double(get(hObject,'String')) > 1) && (str2double(get(hObject,'String')) <= 12) - handles.Kmax = str2double(get(hObject,'String')); - set(hObject,'BackgroundColor', [101,140,196]/255); - - handles.Log = CAP_AddToLog(handles.Log,'Valid number of Kmax chosen',{handles.K},{'Max cluster number'}); - -else - set(hObject,'BackgroundColor',[204,146,146]/255); - handles.Kmax = 12; -end - -guidata(hObject, handles); - - - -function handles = KRange_Edit_CreateFcn(hObject, eventdata, handles) - -set(hObject,'Enable','off'); -set(hObject,'String','Click to enter...'); -set(hObject,'FontAngle','italic'); - -guidata(hObject, handles); - - - -function KRange_Edit_ButtonDownFcn(hObject, eventdata, handles) - -set(hObject,'Enable','on'); -set(hObject,'String',''); -set(hObject,'FontAngle','normal'); -uicontrol(hObject); - -guidata(hObject,handles); - - - -function PCC_Edit_Callback(hObject, eventdata, handles) - -if ~isempty(str2double(get(hObject,'String'))) && (str2double(get(hObject,'String')) > 50) && (str2double(get(hObject,'String')) <= 100) - handles.PCC = str2double(get(hObject,'String')); - set(hObject,'BackgroundColor', [101,140,196]/255); - - handles.Log = CAP_AddToLog(handles.Log,'Valid percentage of items chosen',{handles.K},{'Percentage items to cluster'}); - -else - set(hObject,'BackgroundColor',[204,146,146]/255); - handles.PCC = 12; -end - -guidata(hObject, handles); - - - -function handles = PCC_Edit_CreateFcn(hObject, eventdata, handles) - -set(hObject,'Enable','off'); -set(hObject,'String','Click to enter...'); -set(hObject,'FontAngle','italic'); - -guidata(hObject, handles); - - - -function PCC_Edit_ButtonDownFcn(hObject, eventdata, handles) - -set(hObject,'Enable','on'); -set(hObject,'String',''); -set(hObject,'FontAngle','normal'); -uicontrol(hObject); - -guidata(hObject,handles); - - - - - -%% Editing of all the CAP generation parameters - -% Number of clusters -function ClusterEdit_Callback(hObject, eventdata, handles) - -if ~isempty(str2double(get(hObject,'String'))) && (str2double(get(hObject,'String')) > 1) && (str2double(get(hObject,'String')) <= 12) - handles.K = str2double(get(hObject,'String')); - set(hObject,'BackgroundColor', [101,140,196]/255); - - handles.Log = CAP_AddToLog(handles.Log,'Valid number of clusters chosen',{handles.K},{'Number of clusters'}); - -else - set(hObject,'BackgroundColor',[204,146,146]/255); - handles.K = 5; -end - -guidata(hObject, handles); - - - -function handles = ClusterEdit_CreateFcn(hObject, eventdata, handles) - -set(hObject,'Enable','off'); -set(hObject,'String','Click to enter...'); -set(hObject,'FontAngle','italic'); - -guidata(hObject, handles); - - - -function ClusterEdit_ButtonDownFcn(hObject,eventdata,handles) - -set(hObject,'Enable','on'); -set(hObject,'String',''); -set(hObject,'FontAngle','normal'); -uicontrol(hObject); - -guidata(hObject,handles); - - - -% Number of k-means repetitions -function ClusterRepEdit_Callback(hObject, eventdata, handles) - -if ~isempty(str2double(get(hObject,'String'))) && (str2double(get(hObject,'String')) > 0) && (str2double(get(hObject,'String')) <= 50) - handles.n_rep = str2double(get(hObject,'String')); - set(hObject,'BackgroundColor', [101,140,196]/255); - - handles.Log = CAP_AddToLog(handles.Log,'Valid number of replicates chosen',{handles.n_rep},{'Number of replicates'}); - -else - set(hObject,'BackgroundColor',[204,146,146]/255); - handles.n_rep = 20; -end - -guidata(hObject, handles); - - - -function handles = ClusterRepEdit_CreateFcn(hObject, eventdata, handles) - -set(hObject,'Enable','off'); -set(hObject,'String','Click to enter...'); -set(hObject,'FontAngle','italic'); - -guidata(hObject, handles); - - - -function ClusterRepEdit_ButtonDownFcn(hObject, eventdata, handles) - -set(hObject,'Enable','on'); -set(hObject,'String',''); -set(hObject,'FontAngle','normal'); -uicontrol(hObject); - -guidata(hObject,handles); - - - -% Percentage of positive-valued voxels to keep -function ClusterPpEdit_Callback(hObject, eventdata, handles) - -if ~isempty(str2double(get(hObject,'String'))) && (str2double(get(hObject,'String')) > 0) && (str2double(get(hObject,'String')) <= 100) - handles.Pp = str2double(get(hObject,'String')); - set(hObject,'BackgroundColor', [101,140,196]/255); - - handles.Log = CAP_AddToLog(handles.Log,'Valid percentage positive voxels chosen',{handles.Pp},{'Percentage positive voxels'}); - -else - set(hObject,'BackgroundColor',[204,146,146]/255); - handles.Pp = 20; -end - -guidata(hObject, handles); - - - -function handles = ClusterPpEdit_CreateFcn(hObject, eventdata, handles) - -set(hObject,'Enable','off'); -set(hObject,'String','Click to enter...'); -set(hObject,'FontAngle','italic'); - -guidata(hObject, handles); - - - -function ClusterPpEdit_ButtonDownFcn(hObject, eventdata, handles) - -set(hObject,'Enable','on'); -set(hObject,'String',''); -set(hObject,'FontAngle','normal'); -uicontrol(hObject); - -guidata(hObject,handles); - - - -% Percentage of negative-valued voxels to keep -function ClusterPnEdit_Callback(hObject, eventdata, handles) - -if ~isempty(str2double(get(hObject,'String'))) && (str2double(get(hObject,'String')) > 0) && (str2double(get(hObject,'String')) <= 100) - handles.Pn = str2double(get(hObject,'String')); - set(hObject,'BackgroundColor', [101,140,196]/255); - - handles.Log = CAP_AddToLog(handles.Log,'Valid percentage negative voxels chosen',{handles.Pn},{'Percentage negative voxels'}); - -else - set(hObject,'BackgroundColor',[204,146,146]/255); - handles.Pn = 20; -end - -guidata(hObject, handles); - - - -function handles = ClusterPnEdit_CreateFcn(hObject, eventdata, handles) - -set(hObject,'Enable','off'); -set(hObject,'String','Click to enter...'); -set(hObject,'FontAngle','italic'); - -guidata(hObject, handles); - - - -function ClusterPnEdit_ButtonDownFcn(hObject, eventdata, handles) - -set(hObject,'Enable','on'); -set(hObject,'String',''); -set(hObject,'FontAngle','normal'); -uicontrol(hObject); - -guidata(hObject,handles); - - - - - -%% Clustering control -% When pressing on the 'Cluster' button, we want to run clustering for the -% specified mode (Activation frames, Deactivation frames, or both types of -% frames together), using the previously declared parameters - -% Upon clicking on 'Cluster' -function ClusterButton_Callback(hObject, eventdata, handles) - -% We perform clustering -[handles.CAP,~,handles.STDCAP,handles.idx{handles.ReferencePopulation},... - handles.CorrDist,handles.sfrac] = Run_Clustering(cell2mat(handles.Xonp{handles.ReferencePopulation}),... - handles.K,handles.mask{handles.ReferencePopulation},handles.brain_info{handles.ReferencePopulation},... - handles.Pp,handles.Pn,handles.n_rep,handles.idx_sep_seeds{handles.ReferencePopulation},handles.SeedType); - -% Makes the sliders visible, and the related text too (CAP MNI coordinates) -set(handles.CAP_SliderX,'Visible','on'); -set(handles.CAP_SliderY,'Visible','on'); -set(handles.CAP_SliderZ,'Visible','on'); -set(handles.CAP_XC,'Visible','on'); -set(handles.CAP_YC,'Visible','on'); -set(handles.CAP_ZC,'Visible','on'); -set(handles.CAP_XC,'String',['X: ',sprintf('%.2f',get(handles.CAP_SliderX,'Value'))]); -set(handles.CAP_YC,'String',['Y: ',sprintf('%.2f',get(handles.CAP_SliderY,'Value'))]); -set(handles.CAP_ZC,'String',['Z: ',sprintf('%.2f',get(handles.CAP_SliderZ,'Value'))]); - -% Computation of the similarity -SimMat = corr(handles.CAP',handles.CAP'); -SimMat(isnan(SimMat))=0; - -% Graph set visible, and plotting -handles = ResetGraphDisplay(handles.CAP_Mat,handles); -set(handles.CAP_Mat,'Visible','on'); -imagesc(SimMat,'Parent',handles.CAP_Mat); - -tmp_cb2 = cbrewer('div','RdBu',1000); - -colormap(handles.CAP_Mat,flipud(tmp_cb2)); - -% Correlation ranges from -1 to 1, so this is what we make the graph -% colorbar vary within. We also make the graph square and remove the axes -caxis(handles.CAP_Mat,[-1 1]); -axis(handles.CAP_Mat,'square','on'); -axis(handles.CAP_Mat,'off'); - -% Addition of the colorbar just below -set(handles.ColorbarSimMat,'Visible','on'); -handles.ColorbarSimMat = Create_CAP_colorbar(-1,1,0.5,0,'',... - handles.ColorbarSimMat,'Vertical','div','RdBu',1000); - -% If using the 'Intersection' option... -if strcmp(handles.SeedType,'Intersection') - - handles.sfrac - - % Custom colormap - custom_cm = 1/255*[211,36,36;11,170,65;51,75,163;255,255,180;186,59,204;58,221,221;242,242,242]; - - % Graph displays are stored in a common tmp_sfrac cell array - tmp_sfrac = {handles.pie1,handles.pie2,handles.pie3,handles.pie4,... - handles.pie5}; - - % The pie charts for each cluster are created - for cc = 1:min([handles.K,5]) - - % Pie charts - set(tmp_sfrac{cc},'Visible','on'); - for tt = 1:size(handles.sfrac,3) - lab{tt} = ''; - end - - pie(tmp_sfrac{cc},realmin*ones(size(handles.sfrac,3),1)+squeeze(mean(handles.sfrac(:,cc,:),1)),lab); - caxis(tmp_sfrac{cc},[1,7]); - - switch handles.n_seed - case 1 - errordlg('You managed the impossible, congratulations!'); - case 2 - colormap(tmp_sfrac{cc},(custom_cm)); - set(handles.PIE_S1,'Visible','on'); - set(handles.PIE_S2,'Visible','on'); - set(handles.PIE_S1S2,'Visible','on'); - case 3 - colormap(tmp_sfrac{cc},(custom_cm)); - set(handles.PIE_S1,'Visible','on'); - set(handles.PIE_S2,'Visible','on'); - set(handles.PIE_S3,'Visible','on'); - set(handles.PIE_S2S3,'Visible','on'); - set(handles.PIE_S1S2,'Visible','on'); - set(handles.PIE_S1S3,'Visible','on'); - set(handles.PIE_S1S2S3,'Visible','on'); - end - end -end - -% Same for the slider for the visualization threshold -set(handles.TVIS,'Visible','on'); -set(handles.TVIS_Slider,'Visible','on'); -set(handles.TVIS,'String',['Tv: ',sprintf('%.2f',get(handles.TVIS_Slider,'Value'))]); - -% Makes the colorbar for the CAPs visible -handles.ColorbarCAP = Create_CAP_colorbar(-1.5,1.5,0.5,get(handles.TVIS_Slider,'Value'),'',handles.ColorbarCAP,'Horizontal','div','RdBu',1000); -set(handles.ColorbarCAP,'Visible','on'); - -% Concatenates all CAP information into metavariables for easier subsequent -% changes -tmpX = {handles.CAP1X,handles.CAP2X,handles.CAP3X,handles.CAP4X,handles.CAP5X}; -tmpY = {handles.CAP1Y,handles.CAP2Y,handles.CAP3Y,handles.CAP4Y,handles.CAP5Y}; -tmpZ = {handles.CAP1Z,handles.CAP2Z,handles.CAP3Z,handles.CAP4Z,handles.CAP5Z}; -tmpF = {handles.CAP1_Frames,handles.CAP2_Frames,handles.CAP3_Frames,handles.CAP4_Frames,handles.CAP5_Frames}; - -% For each CAP... -for i_CAP = 1:min([handles.K,5]) - - % Clears the display for each dimension - cla(tmpX{i_CAP}); - cla(tmpY{i_CAP}); - cla(tmpZ{i_CAP}); - - % Plots the new slice for each dimension - tmpX{i_CAP} = plot_slice(handles.CAP(i_CAP,:),... - get(handles.TVIS_Slider,'Value'),1.5,handles.mask{handles.ReferencePopulation},handles.brain,handles.brain_info{handles.ReferencePopulation},... - 'X',get(handles.CAP_SliderX,'Value'),tmpX{i_CAP}); - - tmpY{i_CAP} = plot_slice(handles.CAP(i_CAP,:),... - get(handles.TVIS_Slider,'Value'),1.5,handles.mask{handles.ReferencePopulation},handles.brain,handles.brain_info{handles.ReferencePopulation},... - 'Y',get(handles.CAP_SliderY,'Value'),tmpY{i_CAP}); - - tmpZ{i_CAP} = plot_slice(handles.CAP(i_CAP,:),... - get(handles.TVIS_Slider,'Value'),1.5,handles.mask{handles.ReferencePopulation},handles.brain,handles.brain_info{handles.ReferencePopulation},... - 'Z',get(handles.CAP_SliderZ,'Value'),tmpZ{i_CAP}); - - % Sets the frame percentage text visible and at the right value (number - % of frames from a CAP/total frame number, and then percentage that it - % stands for) - set(tmpF{i_CAP},'Visible','on'); - set(tmpF{i_CAP},'String',{[num2str(sum(handles.idx{handles.ReferencePopulation}==i_CAP)),'/',... - num2str(size(handles.idx{handles.ReferencePopulation},1))],[sprintf('%.2f',... - sum(handles.idx{handles.ReferencePopulation}==i_CAP)/size(handles.idx{handles.ReferencePopulation},1)*100),' [%]']}); -end - -% Fills that subject menu with the subjects from the reference population -handles = FillSubjectList(handles.SubjectMenuMetrics,handles); - -% Also enables and fills the state menu -handles = FillStateList(handles.StateMenu,handles); - -% Enables the Metrics button for the next part of the analysis if we -% only deal with one dataset -if handles.n_datasets == 1 - set(handles.MetricsButton,'Enable','on'); - set(handles.MetricsButton,'Visible','on'); - -% Else, we enable the assignment before enabling the metrics computation -elseif handles.n_datasets > 1 - set(handles.AssignButton,'Enable','on'); - set(handles.AssignButton,'Visible','on'); - - set(handles.CAP_TP,'Visible','on'); - set(handles.Percentile_Edit,'Visible','on'); -end - -handles.Log = CAP_AddToLog(handles.Log,'Clustering performed',... - {handles.ReferencePopulation,handles.K,handles.n_rep,handles.Pp,... - handles.Pn},{'Reference group index',... - 'Number of clusters','Number of replicates',... - 'Percentage positive voxels','Percentage negative voxels'}); - -guidata(hObject, handles); - - - - - -%% Frame assignment control -% This button is only enabled after clustering has been performed on the -% reference population. It assigns frames from the other populations to the -% computed CAPs - -% Happens upon clicking on the 'Assign' buttons -function AssignButton_Callback(hObject, eventdata, handles) - -tmp_notref = []; -tmp_computedTPsel = []; - -% For each non-reference dataset... -for n_ds = 1:handles.n_datasets - if n_ds ~= handles.ReferencePopulation - - tmp_notref = [tmp_notref,n_ds]; - - % Attempts to access the frames for a given dataset; if it fails, it - % means we must compute activity. If it works, we do nothing because - % activity has already been computed - try - justtotest = handles.Xonp{n_ds}; - catch - - [handles.Xonp{n_ds},p,handles.FrameIndices{n_ds},handles.idx_sep_seeds{n_ds}] = ... - CAP_find_activity(handles.TC{n_ds},handles.seed,handles.T,handles.FD{n_ds},handles.Tmot,handles.SelMode,handles.SeedType); - - handles.RetainedPercentage{n_ds} = p(4:5,:); - - tmp_computedTPsel = [tmp_computedTPsel,n_ds]; - end - - try - handles.idx{n_ds} = CAP_AssignFrames(handles.CAP,cell2mat(handles.Xonp{n_ds}),handles.CorrDist,handles.percentile)'; - catch - errordlg('You computed CAPs with a different CAP type compared to the one used now; please use the same CAP type !'); - end - end -end - -% We then enable the computation of metrics -set(handles.MetricsButton,'Enable','on'); -set(handles.MetricsButton,'Visible','on'); - -handles.Log = CAP_AddToLog(handles.Log,'Frame assignment performed',... - {handles.ReferencePopulation,num2str(tmp_computedTPsel),... - num2str(tmp_notref)},{'Reference group index','Group indices for which frames were computed',... - 'Group indices for which frames were assigned'}); - -guidata(hObject, handles); - - - - - -%% Parameter control: percentile to use for frame assignment -% This asks for the percentile to use in frame assignment (i.e. the -% threshold of correlation below which frames are left unassigned) - -function Percentile_Edit_Callback(hObject, eventdata, handles) - -if ~isempty(str2double(get(hObject,'String'))) && (str2double(get(hObject,'String')) <= 100) - handles.percentile = str2double(get(hObject,'String')); - set(hObject,'BackgroundColor', [101,140,196]/255); - - handles.Log = CAP_AddToLog(handles.Log,'Valid percentile chosen',{handles.percentile},{'Percentile'}); - -else - set(hObject,'BackgroundColor',[204,146,146]/255); - handles.percentile = 5; -end - -guidata(hObject, handles); - - - -function handles = Percentile_Edit_CreateFcn(hObject, eventdata, handles) - -set(hObject,'Enable','off'); -set(hObject,'String','Click to enter...'); -set(hObject,'FontAngle','italic'); - -if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor','white'); -end - - - -function Percentile_Edit_ButtonDownFcn(hObject, eventdata, handles) - -set(hObject,'Enable','on'); -set(hObject,'String',''); -set(hObject,'FontAngle','normal'); -uicontrol(hObject); - -guidata(hObject, handles); - - - - - -%% Sliders for CAP visualization (MNI coordinates) -% When changing along a slider, we want to update the graphs and the text of -% the MNI coordinate below the slider - -% X dimension slider -function CAP_SliderX_Callback(hObject, eventdata, handles) - -set(handles.CAP_XC,'String',['X: ',sprintf('%.2f',get(hObject,'Value'))]); -tmp_struct = {handles.CAP1X,handles.CAP2X,handles.CAP3X,handles.CAP4X,handles.CAP5X}; - -for i_CAP = 1:min([handles.K,5]) - cla(tmp_struct{i_CAP}); - tmp_struct{i_CAP} = plot_slice(handles.CAP(i_CAP,:),get(handles.TVIS_Slider,'Value'),... - 1.5,handles.mask{handles.ReferencePopulation},handles.brain,... - handles.brain_info{handles.ReferencePopulation},'X',get(hObject,'Value'),tmp_struct{i_CAP}); -end - -guidata(hObject, handles); - - - -function CAP_SliderX_CreateFcn(hObject, eventdata, handles) - -if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor',[.9 .9 .9]); -end - -guidata(hObject,handles); - - - -% Y dimension slider -function CAP_SliderY_Callback(hObject, eventdata, handles) - -set(handles.CAP_YC,'String',['Y: ',sprintf('%.2f',get(hObject,'Value'))]); -tmp_struct = {handles.CAP1Y,handles.CAP2Y,handles.CAP3Y,handles.CAP4Y,handles.CAP5Y}; - -for i_CAP = 1:min([handles.K,5]) - cla(tmp_struct{i_CAP}); - tmp_struct{i_CAP} = plot_slice(handles.CAP(i_CAP,:),get(handles.TVIS_Slider,'Value'),... - 1.5,handles.mask{handles.ReferencePopulation},handles.brain,... - handles.brain_info{handles.ReferencePopulation},'Y',get(hObject,'Value'),tmp_struct{i_CAP}); -end - -guidata(hObject,handles); - - - -function CAP_SliderY_CreateFcn(hObject, eventdata, handles) - -if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor',[.9 .9 .9]); -end - -guidata(hObject,handles); - - - -% Z dimension slider -function CAP_SliderZ_Callback(hObject, eventdata, handles) - -set(handles.CAP_ZC,'String',['Z: ',sprintf('%.2f',get(hObject,'Value'))]); -tmp_struct = {handles.CAP1Z,handles.CAP2Z,handles.CAP3Z,handles.CAP4Z,handles.CAP5Z}; - -for i_CAP = 1:min([handles.K,5]) - - cla(tmp_struct{i_CAP}); - tmp_struct{i_CAP} = plot_slice(handles.CAP(i_CAP,:),get(handles.TVIS_Slider,'Value'),... - 1.5,handles.mask{handles.ReferencePopulation},handles.brain,handles.brain_info{handles.ReferencePopulation},'Z',get(hObject,'Value'),tmp_struct{i_CAP}); -end - -guidata(hObject,handles); - - - -function CAP_SliderZ_CreateFcn(hObject, eventdata, handles) - -if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor',[.9 .9 .9]); -end - -guidata(hObject,handles); - - - - - -%% Sliders for threshold visualization (CAP analysis) -% Again, we want to update the slices and the text if we change those -% sliders - -function TVIS_Slider_Callback(hObject, eventdata, handles) - -% The text is changed -set(handles.TVIS,'String',['Tv: ',sprintf('%.2f',get(hObject,'Value'))]); - -% The colorbar graph is modified to suit the new threshold value -cla(handles.ColorbarCAP); -handles.ColorbarCAP = Create_CAP_colorbar(-1.5,1.5,0.5,get(hObject,'Value'),'',handles.ColorbarCAP,'Horizontal','div','RdBu',1000); - -% The brain slices are replotted -tmpX = {handles.CAP1X,handles.CAP2X,handles.CAP3X,handles.CAP4X,handles.CAP5X}; -tmpY = {handles.CAP1Y,handles.CAP2Y,handles.CAP3Y,handles.CAP4Y,handles.CAP5Y}; -tmpZ = {handles.CAP1Z,handles.CAP2Z,handles.CAP3Z,handles.CAP4Z,handles.CAP5Z}; - -for i_CAP = 1:min([handles.K,5]) - - cla(tmpX{i_CAP}); - cla(tmpY{i_CAP}); - cla(tmpZ{i_CAP}); - - tmpX{i_CAP} = plot_slice(handles.CAP(i_CAP,:),get(hObject,'Value'),1.5,... - handles.mask{handles.ReferencePopulation},handles.brain,... - handles.brain_info{handles.ReferencePopulation},'X',get(handles.CAP_SliderX,'Value'),tmpX{i_CAP}); - - tmpY{i_CAP} = plot_slice(handles.CAP(i_CAP,:),get(hObject,'Value'),1.5,... - handles.mask{handles.ReferencePopulation},handles.brain,... - handles.brain_info{handles.ReferencePopulation},'Y',get(handles.CAP_SliderY,'Value'),tmpY{i_CAP}); - - tmpZ{i_CAP} = plot_slice(handles.CAP(i_CAP,:),get(hObject,'Value'),1.5,... - handles.mask{handles.ReferencePopulation},handles.brain,... - handles.brain_info{handles.ReferencePopulation},'Z',get(handles.CAP_SliderZ,'Value'),tmpZ{i_CAP}); -end - -guidata(hObject,handles); - - - -function TVIS_Slider_CreateFcn(hObject, eventdata, handles) - -if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor',[.9 .9 .9]); -end - - - - - -%% Metrics computation control -% When pressing on the 'Compute metrics' button, the different metrics for -% CAP analysis are computed, including: -% - Similarity between CAPs -% - Transition probabilities from a state to the other (average + subject) -% - Sequence of states for each subject -% - Cumulative state sequence for all subjects -% - Counts (number of frames in a state) -% - Number of times entering a state, and duration spent in a state - -function MetricsButton_Callback(hObject, eventdata, handles) - -% All the metrics are computed for all the datasets -for n_ds = 1:handles.n_datasets - - try - [handles.TPM{n_ds},handles.Counts{n_ds},... - handles.Number{n_ds},handles.Avg_Duration{n_ds},... - handles.Duration{n_ds},handles.TM{n_ds},... - handles.From_Baseline{n_ds},handles.To_Baseline{n_ds},... - handles.Baseline_resilience{n_ds},handles.Resilience{n_ds},... - handles.Betweenness{n_ds},handles.kin{n_ds},handles.kout{n_ds}] =... - Compute_Metrics_simpler(handles.idx{n_ds},handles.FrameIndices{n_ds}.kept.active,... - handles.FrameIndices{n_ds}.scrubbedandactive,... - handles.K,handles.TR); - catch - - errordlg('You tried computing metrics using parameter values different from the ones that were employed to generate CAPs; please check !'); - end -end - -handles.Log = CAP_AddToLog(handles.Log,'Metrics computed',... - {handles.n_datasets,handles.K,handles.TR},... - {'Number of datasets','Number of clusters','TR'}); - -tmp_cb = cbrewer('seq','Greys',1000); - -% 2. Transition matrix for all subjects together - -tmp_toplot = squeeze(mean(handles.TM{handles.ReferencePopulation},3)); -tmp_toplot = tmp_toplot(3:end-1,3:end-1); - -% Make graph visible and plotting -handles = ResetGraphDisplay(handles.TMGraph,handles); -set(handles.TMGraph,'Visible','on'); -imagesc(tmp_toplot,'Parent',handles.TMGraph); - -colormap(handles.TMGraph,flipud(tmp_cb)); - -clear tmp_toplot - -% Arbitrary setting of probability scale from 0 to 0.03 -caxis(handles.TMGraph,[0 0.03]); -axis(handles.TMGraph,'square','on'); -axis(handles.TMGraph,'off'); - -% 3. Transition matrix for one subject - -tmp_toplot = squeeze(handles.TM{handles.ReferencePopulation}(:,:,get(handles.SubjectMenuMetrics,'Value'))); -tmp_toplot = tmp_toplot(3:end-1,3:end-1); - -% makes graph visible and plots the information given by the Subject popup -handles = ResetGraphDisplay(handles.TM_Subject,handles); -set(handles.TM_Subject,'Visible','on'); -imagesc(tmp_toplot,... - 'Parent',handles.TM_Subject); - -colormap(handles.TM_Subject,flipud(tmp_cb)); - -clear tmp_toplot - -% Same setting for the axes as for the average graph -caxis(handles.TM_Subject,[0 0.03]); -axis(handles.TM_Subject,'square','on'); -axis(handles.TM_Subject,'off'); - -% We then create the colorbar for both cases -set(handles.ColorbarTransMat,'Visible','on'); -handles.ColorbarTransMat = Create_CAP_colorbar(0,0.03,0.01,0,'',... - handles.ColorbarTransMat,'Vertical','seq','Greys',1000); - -% Makes the subject menu visible -set(handles.SubjectMenuMetrics,'Visible','on'); - -% 4. Dynamic state plotting - -% Makes the graph visible -handles = ResetGraphDisplay(handles.DynStates,handles); -set(handles.DynStates,'Visible','on'); - -% Concatenates information from the different datasets -tmp_toplot = []; - -for i = 1:handles.n_datasets - tmp_toplot = [tmp_toplot; handles.TPM{i}; 0*ones(5,handles.SubjSize.TP)]; -end -tmp_toplot = tmp_toplot(1:end-5,:); - -custom_cm = cbrewer('qual','Set1',handles.K+1); -custom_cm = [0.05,0.05,0.05;1,1,1;custom_cm]; - -% If the TR has been properly entered, the x-axis is time; else, it depicts -% time index. In any case, we plot the states -if handles.isTROK - imagesc(tmp_toplot,'Parent',handles.DynStates); - colormap(handles.DynStates,(custom_cm)); - xlabel(handles.DynStates,'Time [s]'); -else - imagesc(tmp_toplot,'Parent',handles.DynStates); - colormap(handles.DynStates,(custom_cm)); - xlabel(handles.DynStates,'Time index [-]'); -end - -ylabel(handles.DynStates,'Subjects [-]'); -axis(handles.DynStates,'off'); -caxis(handles.DynStates,[-1,handles.K+1]); - -clear tmp_toplot - -% 5. Cumulative state distributions - -% Makes the graph visible -handles = ResetGraphDisplay(handles.CumStates,handles); -set(handles.CumStates,'Visible','on'); - -for i = 1:handles.n_datasets - % Cumulative distribution for the state that we want to be displayed (i.e. - % the state from the popup menu) - handles.TPMCum{i} = cumsum(handles.TPM{i} == get(handles.StateMenu,'Value'),2); - - % Average of the considered state across subjects - tmp_TPMCum{i} = mean(handles.TPMCum{i},1); -end - -% Similarly as above, we plot time if we have a valid TR; else, we plot -% 'time index' -if handles.isTROK == false - - for i = 1:handles.n_datasets - % We first plot each subject curve - for j = 1:size(handles.TPMCum{i},1) - plot(1:size(handles.TPM{i},2),handles.TPMCum{i}(j,:),'Color',handles.PopColor{1}(i,:),... - 'Parent',handles.CumStates); - hold(handles.CumStates,'on'); - end - end - - for i = 1:handles.n_datasets - % Then, we plot a bold average across subjects - plot(1:size(handles.TPM{i},2),tmp_TPMCum{i},'Color',handles.PopColor{2}(i,:),... - 'LineWidth',2,'Parent',handles.CumStates); - xlabel(handles.CumStates,'Time index [-]','FontSize',10); - xlim(handles.CumStates,[1,size(handles.TPM{i},2)]); - end -else - for i = 1:handles.n_datasets - for j = 1:size(handles.TPMCum{i},1) - plot(((1:size(handles.TPM{i},2))-1)*handles.TR,... - handles.TPMCum{i}(j,:),... - 'Color',handles.PopColor{1}(i,:),'Parent',handles.CumStates); - hold(handles.CumStates,'on'); - end - end - - for i = 1:handles.n_datasets - plot(((1:size(handles.TPM{i},2))-1)*handles.TR,... - tmp_TPMCum{i},... - 'LineWidth',2,'Color',handles.PopColor{2}(i,:),'Parent',handles.CumStates); - xlabel(handles.CumStates,'Time [s]','FontSize',10); - xlim(handles.CumStates,[0,(size(handles.TPM{i},2)-1)*handles.TR]); - end -end - - -ylabel(handles.CumStates,'Cumul. sum [-]','FontSize',10); -set(handles.CumStates,'Box','off'); - -% Makes the state menu visible -set(handles.StateMenu,'Visible','on'); - -% 6. Violin plots -% Below, we plot violin plots depicting: -% - Raw counts of state excursions -% - Fractional counts of state excursions -% - Number of times entering a state -% - Duration of state excursions - -% We build the legend used to plot the violins -% leg_viol = cell(handles.K); -for i = 1:handles.K - leg_viol{i} = num2str(i); -end - -% Makes graphs ready -handles = ResetGraphDisplay(handles.ViolinCounts,handles); -set(handles.ViolinCounts,'Visible','on'); - -handles = ResetGraphDisplay(handles.ViolinCountsFrac,handles); -set(handles.ViolinCountsFrac,'Visible','on'); - -handles = ResetGraphDisplay(handles.ViolinNumber,handles); -set(handles.ViolinNumber,'Visible','on'); - -handles = ResetGraphDisplay(handles.ViolinDuration,handles); -set(handles.ViolinDuration,'Visible','on'); - -% Concatenates the values from the different populations -tmp_toplot = ConcatMat(handles.Resilience,handles.n_datasets,handles.K,handles.n_subjects,'Resilience'); - -% Plots the raw count values -[~,~,handles.ViolinCounts] = MakeViolin(tmp_toplot,... - handles.ViolinCounts,leg_viol,'Resilience [-]',handles.PopColor,handles.n_datasets,handles.K); - -clear tmp_toplot - -tmp_toplot = ConcatMat(handles.kin,handles.n_datasets,handles.K,handles.n_subjects,'kin'); - -% Plots the normalized count values -[~,~,handles.ViolinCountsFrac] = MakeViolin(tmp_toplot,... - handles.ViolinCountsFrac,leg_viol,'kin [-]',handles.PopColor,handles.n_datasets,handles.K); - -clear tmp_toplot - -tmp_toplot = ConcatMat(handles.kout,handles.n_datasets,handles.K,handles.n_subjects,'kout'); - -% Plots the number of times a state is entered -[~,~,handles.ViolinNumber] = MakeViolin(tmp_toplot,... - handles.ViolinNumber,leg_viol,'kout [-]',handles.PopColor,handles.n_datasets,handles.K); - -clear tmp_toplot - - -tmp_toplot = ConcatMat(handles.Betweenness,handles.n_datasets,handles.K,handles.n_subjects,'Betweenness'); - -try - - [~,~,handles.ViolinDuration] = MakeViolin(tmp_toplot,... - handles.ViolinDuration,leg_viol,'Betweenness [-]',handles.PopColor,handles.n_datasets,handles.K); - -catch - -end - -clear tmp_toplot - - -% Makes the displays visible -set(handles.DS_Scrubbed,'Visible','on'); -set(handles.DS_Scrubbed,'ForegroundColor',custom_cm(1,:)); - -set(handles.DS_NotSelected,'Visible','on'); -set(handles.DS_NotSelected,'ForegroundColor',[0.9,0.9,0.9]); - -set(handles.DS_Unassigned,'Visible','on'); -set(handles.DS_Unassigned,'ForegroundColor',custom_cm(handles.K+3,:)); - -tmp = {handles.DS_CAP1,handles.DS_CAP2,handles.DS_CAP3,handles.DS_CAP4,... - handles.DS_CAP5,handles.DS_CAP6,handles.DS_CAP7,handles.DS_CAP8,... - handles.DS_CAP9,handles.DS_CAP10,handles.DS_CAP11,handles.DS_CAP12}; - -for i = 1:handles.K - set(tmp{i},'Visible','on'); - set(tmp{i},'ForegroundColor',custom_cm(2+i,:)); -end - -clear tmp - -tmp = {handles.V_POP1,handles.V_POP2,handles.V_POP3,handles.V_POP4}; - -for i = 1:handles.n_datasets - set(tmp{i},'Visible','on'); -end - -clear tmp - -guidata(hObject,handles); - - - - - -%% Subject popup menu control (metrics computation) -% When a new subject is chosen, the display of the transition matrix graph -% is changed - -function SubjectMenuMetrics_Callback(hObject, eventdata, handles) - -% In the case when we have something to plot... -try - % ... we reset the graph display, make the graph visible, and plot - % again - handles = ResetGraphDisplay(handles.TM_Subject,handles); - set(handles.TM_Subject,'Visible','on'); - - tmp_toplot = squeeze(handles.TM{handles.ReferencePopulation}(:,:,get(handles.SubjectMenuMetrics,'Value'))); - tmp_toplot = tmp_toplot(3:end,3:end); - - imagesc(tmp_toplot,'Parent',handles.TM_Subject); - caxis(handles.TM_Subject,[0 0.03]); - axis(handles.TM_Subject,'square','on'); - axis(handles.TM_Subject,'off'); - - tmp_cb = cbrewer('seq','Greys',1000); - colormap(handles.TM_Subject,flipud(tmp_cb)); - - clear tmp_toplot - - handles.Log = CAP_AddToLog(handles.Log,'Subject index changed (metrics)',... - {get(hObject,'Value')},... - {'Subject index'}); - -catch - errordlg('Please recompute metrics for the presently considered population !'); -end - -guidata(hObject,handles); - - - -function SubjectMenuMetrics_CreateFcn(hObject, eventdata, handles) - -if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor','white'); -end - - - - - -%% State menu popup control -% When we change our state of interest, we will change the display of the -% cumulative state being displayed - -function StateMenu_Callback(hObject, eventdata, handles) - -handles = ResetGraphDisplay(handles.CumStates,handles); -set(handles.CumStates,'Visible','on'); - -% In the case of a non-null matrix... -if ~isempty(handles.TPM) - - for i = 1:handles.n_datasets - % Cumulative distribution for the state that we want to be displayed (i.e. - % the state from the popup menu) - handles.TPMCum{i} = cumsum(handles.TPM{i} == get(handles.StateMenu,'Value'),2); - - % Average of the considered state across subjects - tmp_TPMCum{i} = mean(handles.TPMCum{i},1); - end - - % Similarly as above, we plot time if we have a valid TR; else, we plot - % 'time index' - if handles.isTROK == false - - for i = 1:handles.n_datasets - % We first plot each subject curve - for j = 1:size(handles.TPMCum{i},1) - plot(handles.TPMCum{i}(j,:),'Color',handles.PopColor{1}(i,:),... - 'Parent',handles.CumStates); - hold(handles.CumStates,'on'); - end - end - - for i = 1:handles.n_datasets - % Then, we plot a bold average across subjects - plot(tmp_TPMCum{i},'Color',handles.PopColor{2}(i,:),... - 'LineWidth',2,'Parent',handles.CumStates); - xlabel(handles.CumStates,'Time index [-]','FontSize',8); - end - else - for i = 1:handles.n_datasets - for j = 1:size(handles.TPMCum{i},1) - plot(((1:size(handles.TPM{i},2))-1)*handles.TR,... - handles.TPMCum{i}(j,:),... - 'Color',handles.PopColor{1}(i,:),'Parent',handles.CumStates); - hold(handles.CumStates,'on'); - end - end - - for i = 1:handles.n_datasets - plot(((1:size(handles.TPM{i},2))-1)*handles.TR,... - tmp_TPMCum{i},... - 'LineWidth',2,'Color',handles.PopColor{2}(i,:),'Parent',handles.CumStates); - xlabel(handles.CumStates,'Time [s]','FontSize',8); - end - end - - -ylabel(handles.CumStates,'Cumul. sum [-]','FontSize',8); -set(handles.CumStates,'Box','off'); - -end - -guidata(hObject,handles); - - - -function StateMenu_CreateFcn(hObject, eventdata, handles) - -if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) - set(hObject,'BackgroundColor','white'); -end - - - - - -%% General utilities - -% Resets the display of a graph object -function handles = ResetGraphDisplay(Graph,handles) - -cla(Graph); -set(Graph,'Visible','off'); - - - -% Fills the entries of a pop-up menu with 'Subject _' entries from the -% reference population -function handles = FillSubjectList(ToFill,handles) - -tmp_string = {}; - -for ns = 1:handles.n_subjects{handles.ReferencePopulation} - tmp_string{ns} = ['Subject ',num2str(ns)]; -end - -set(ToFill,'String',tmp_string); - -clear tmp_string - - - -% Fills the entries of a pop-up menu with the different population entries -function handles = FillPopulationList(ToFill,handles) - -tmp_string = {}; - -for ns = 1:handles.n_datasets - tmp_string{ns} = [handles.SubjNames{ns}]; -end - -set(ToFill,'String',tmp_string); - -clear tmp_string - - - -% Fills the entries of a pop-up menu with the different population entries -function handles = FillStateList(ToFill,handles) - -tmp_string = {}; - -for ns = 1:handles.K - tmp_string{ns} = ['State ',num2str(ns)]; -end - -set(ToFill,'String',tmp_string); - -clear tmp_string - - - -% Removes NaN-containing lines from a matrix (used for the plotting of -% duration violin plots) -function M2 = DiscardNaN(M) - -% We initialize the output matrix as a void one -M2 = []; - -% For each row, we count the amount of NaN entries; if not equal to zero, -% then we discard the line -for i = 1:size(M,1) - if sum(isnan(M(i,:))) > 0 - else - M2 = [M2;M(i,:)]; - end -end - - - -% Concatenates populations appropriately for Violin plotting -function M2 = ConcatMat(M,n_pop,n_states,n_subjects,type) - -% Creates the data matrix (nan values are used to have the same amount of -% data for each group) -M2 = nan(n_pop*n_states,max(cell2mat(n_subjects))); - -for i = 1:n_pop - - switch type - case 'Raw counts' - - tmp = M{i}.raw.state(:,1:n_states)'; - - for j = 1:n_states - M2(i+(j-1)*n_pop,1:size(tmp,2)) = tmp(j,:); - end - - clear tmp - - case 'Normalized counts' - - tmp = M{i}.frac.state(:,1:n_states)'; - - for j = 1:n_states - M2(i+(j-1)*n_pop,1:size(tmp,2)) = tmp(j,:); - end - - clear tmp - - case 'Number' - - tmp = M{i}(:,3:3+n_states-1)'; - - for j = 1:n_states - M2(i+(j-1)*n_pop,1:size(tmp,2)) = tmp(j,:); - end - - clear tmp - - case 'Duration' - tmp = DiscardNaN(M{i}(:,3:3+n_states-1))'; - - for j = 1:n_states - M2(i+(j-1)*n_pop,1:size(tmp,2)) = tmp(j,:); - end - - clear tmp - - case 'Betweenness' - - tmp = M{i}'; - - for j = 1:n_states - M2(i+(j-1)*n_pop,1:size(tmp,2)) = tmp(j,:); - end - - clear tmp - - case 'kin' - - tmp = M{i}'; - - for j = 1:n_states - M2(i+(j-1)*n_pop,1:size(tmp,2)) = tmp(j,:); - end - - clear tmp - - case 'kout' - - tmp = M{i}'; - - for j = 1:n_states - M2(i+(j-1)*n_pop,1:size(tmp,2)) = tmp(j,:); - end - - clear tmp - - case 'Resilience' - - tmp = M{i}'; - - for j = 1:n_states - M2(i+(j-1)*n_pop,1:size(tmp,2)) = tmp(j,:); - end - - clear tmp - - case 'FD' - tmp = M{i}; - - for j = 1:n_states - M2(i+(j-1)*n_pop,1:size(tmp,2)) = tmp(j,:); - end - end -end diff --git a/DefaultData/2019_03_03_BCT/adjacency_plot_und.m b/DefaultData/2019_03_03_BCT/adjacency_plot_und.m deleted file mode 100755 index 64ddbd2..0000000 --- a/DefaultData/2019_03_03_BCT/adjacency_plot_und.m +++ /dev/null @@ -1,53 +0,0 @@ -function [X,Y,Z] = adjacency_plot_und(aij,coor) -%ADJACENCY_PLOT_UND Quick visualization tool -% -% [X,Y,Z] = ADJACENCY_PLOT(AIJ,COOR) takes adjacency matrix AIJ and node -% spatial coordinates COOR and generates three vectors that can be used -% for quickly plotting the edges in AIJ. If no coordinates are specified, -% then each node is assigned a position on a circle. COOR can, in -% general, be 2D or 3D. -% -% Example: -% -% >> load AIJ; % load your adjacency matrix -% >> load COOR; % load 3D coordinates for each node -% >> [x,y,z] = adjacency_plot_und(AIJ,COOR); % call function -% >> plot3(x,y,z); % plots network as a single line object -% -% If COOR were 2D, the PLOT3 command changes to a PLOT command. -% -% NOTE: This function is similar to MATLAB's GPLOT command. -% -% Richard Betzel, Indiana University, 2013 - -n = length(aij); -if nargin < 2 - coor = zeros(n,2); - for i = 1:n - coor(i,:) = [cos(2*pi*(i - 1)./n), sin(2*pi*(i - 1)./n)]; - end -end - -[i,j] = find(triu(aij,1)); -[~, p] = sort(max(i,j)); -i = i(p); -j = j(p); - -X = [ coor(i,1) coor(j,1)]'; -Y = [ coor(i,2) coor(j,2)]'; -if size(coor,2) == 3 - Z = [ coor(i,3) coor(j,3)]'; -end -if isfloat(coor) || nargout ~= 0 - X = [X; NaN(size(i))']; - Y = [Y; NaN(size(i))']; - if size(coor,2) == 3 - Z = [Z; NaN(size(i))']; - end -end - -X = X(:); -Y = Y(:); -if size(coor,2) == 3 - Z = Z(:); -end diff --git a/DefaultData/2019_03_03_BCT/agreement.m b/DefaultData/2019_03_03_BCT/agreement.m deleted file mode 100755 index 50c75ce..0000000 --- a/DefaultData/2019_03_03_BCT/agreement.m +++ /dev/null @@ -1,60 +0,0 @@ -function D = agreement(ci,buffsz) -%AGREEMENT Agreement matrix from clusters -% -% D = AGREEMENT(CI) takes as input a set of vertex partitions CI of -% dimensions [vertex x partition]. Each column in CI contains the -% assignments of each vertex to a class/community/module. This function -% aggregates the partitions in CI into a square [vertex x vertex] -% agreement matrix D, whose elements indicate the number of times any two -% vertices were assigned to the same class. -% -% In the case that the number of nodes and partitions in CI is large -% (greater than ~1000 nodes or greater than ~1000 partitions), the script -% can be made faster by computing D in pieces. The optional input BUFFSZ -% determines the size of each piece. Trial and error has found that -% BUFFSZ ~ 150 works well. -% -% Inputs, CI, set of (possibly) degenerate partitions -% BUFFSZ, optional second argument to set buffer size -% -% Outputs: D, agreement matrix -% -% Richard Betzel, Indiana University, 2012 - -%modification history -%09.24.2012 - added loop for big N that makes the function slower but also -% prevents it from maxing out memory. - -n = size(ci,2); - -if nargin < 2 - buffsz = 1000; -end - -if n <= buffsz - - ind = dummyvar(ci); - D = ind*ind'; - -else - - a = 1:buffsz:n; - b = buffsz:buffsz:n; - - if length(a) ~= length(b) - b = [b, n]; - end - - x = [a' b']; - nbuff = size(x,1); - - D = zeros(size(ci,1)); - for i = 1:nbuff - y = ci(:,x(i,1):x(i,2)); - ind = dummyvar(y); - D = D + ind*ind'; - end - -end - -D = D.*~eye(length(D)); diff --git a/DefaultData/2019_03_03_BCT/agreement_weighted.m b/DefaultData/2019_03_03_BCT/agreement_weighted.m deleted file mode 100755 index d917f43..0000000 --- a/DefaultData/2019_03_03_BCT/agreement_weighted.m +++ /dev/null @@ -1,28 +0,0 @@ -function D = agreement_weighted(CI,Wts) -% AGREEMENT_WEIGHTED Weights agreement matrix -% -% D = AGREEMENT_WEIGHTED(CI,WTS) is identical to AGREEMENT, with the -% exception that each partitions contribution is weighted according to -% the corresponding scalar value stored in the vector WTS. As an example, -% suppose CI contained partitions obtained using some heuristic for -% maximizing modularity. A possible choice for WTS might be the Q metric -% (Newman's modularity score). Such a choice would add more weight to -% higher modularity partitions. -% -% NOTE: Unlike AGREEMENT, this script does not have the input argument -% BUFFSZ. -% -% Inputs: CI, set of partitions -% WTS, relative weight of importance of each paritition -% -% Outputs: D, weighted agreement matrix -% -% Richard Betzel, Indiana University, 2013 - -Wts = Wts./sum(Wts); -[N,M] = size(CI); -D = zeros(N); -for i = 1:M - d = dummyvar(CI(:,i)); - D = D + (d*d')*Wts(i); -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/align_matrices.m b/DefaultData/2019_03_03_BCT/align_matrices.m deleted file mode 100755 index db4742e..0000000 --- a/DefaultData/2019_03_03_BCT/align_matrices.m +++ /dev/null @@ -1,135 +0,0 @@ -function [Mreordered,Mindices,cost] = align_matrices(M1,M2,dfun,flag) -%ALIGN_MATRICES Alignment of two matrices -% -% [Mreordered,Mindices,cost] = align_matrices(M1,M2,dfun,flag) -% -% This function aligns two matrices relative to one another by reordering -% the nodes in M2. The function uses a version of simulated annealing. -% -% Inputs: M1 = first connection matrix (square) -% M2 = second connection matrix (square) -% dfun = distance metric to use for matching: -% 'absdff' = absolute difference -% 'sqrdff' = squared difference -% 'cosang' = cosine of vector angle -% -% Mreordered = reordered connection matrix M2 -% Mindices = reordered indices -% cost = distance between M1 and Mreordered -% -% Connection matrices can be weighted or binary, directed or undirected. -% They must have the same number of nodes. M1 can be entered in any -% node ordering. -% -% Note that in general, the outcome will depend on the initial condition -% (the setting of the random number seed). Also, there is no good way to -% determine optimal annealing parameters in advance - these parameters -% will need to be adjusted "by hand" (particularly H, Texp, T0, and Hbrk). -% For large and/or dense matrices, it is highly recommended to perform -% exploratory runs varying the settings of 'H' and 'Texp' and then select -% the best values. -% -% Based on extensive testing, it appears that T0 and Hbrk can remain -% unchanged in most cases. Texp may be varied from 1-1/H to 1-10/H, for -% example. H is the most important parameter - set to larger values as -% the problem size increases. Good solutions can be obtained for -% matrices up to about 100 nodes. It is advisable to run this function -% multiple times and select the solution(s) with the lowest 'cost'. -% -% If the two matrices are related it may be very helpful to pre-align them -% by reordering along their largest eigenvectors: -% [v,~] = eig(M1); v1 = abs(v(:,end)); [a1,b1] = sort(v1); -% [v,~] = eig(M2); v2 = abs(v(:,end)); [a2,b2] = sort(v2); -% [a,b,c] = overlapMAT2(M1(b1,b1),M2(b2,b2),'dfun',1); -% -% Setting 'Texp' to zero cancels annealing and uses a greedy algorithm -% instead. -% -% Yusuke Adachi, University of Tokyo, 2010 -% Olaf Sporns, Indiana University, 2010 - -N = size(M1,1); - -% define maxcost (greatest possible difference) -switch dfun -case 'absdff' - maxcost = sum(abs(sort(M1(:))-(sort(M2(:),'descend')))); -case 'sqrdff' - maxcost = sum((sort(M1(:))-(sort(M2(:),'descend'))).^2); -case 'cosang' - maxcost = pi/2; -end; - -% initialize lowcost -switch dfun -case 'absdff' - lowcost = sum(sum(abs(M1-M2)))/maxcost; -case 'sqrdff' - lowcost = sum(sum((M1-M2).^2))/maxcost; -case 'cosang' - lowcost = acos(dot(M1(:),M2(:))./sqrt(dot(M1(:),M1(:))*dot(M2(:),M2(:))))/maxcost; -end; - -% initialize -mincost = lowcost; -anew = 1:N; -amin = 1:N; -h = 0; hcnt = 0; - -% set annealing parameters -% H determines the maximal number of steps -% Texp determines the steepness of the temperature gradient -% T0 sets the initial temperature (and scales the energy term) -% Hbrk sets a break point for the simulation (no further improvement) -H = 1e06; Texp = 1-1/H; T0 = 1e-03; Hbrk = H/10; -%Texp = 0; - -while hHbrk) - break; - end; - % current temperature - T = T0*Texp^h; - % choose two positions at random and flip them - atmp = anew; - %r = randperm(N); % slower - r = ceil(rand(1,2).*N); - atmp(r(1)) = anew(r(2)); - atmp(r(2)) = anew(r(1)); - switch dfun - case 'absdff' - costnew = sum(sum(abs(M1-M2(atmp,atmp))))/maxcost; - case 'sqrdff' - costnew = sum(sum((M1-M2(atmp,atmp)).^2))/maxcost; - case 'cosang' - M2atmp = M2(atmp,atmp); - costnew = acos(dot(M1(:),M2atmp(:))./sqrt(dot(M1(:),M1(:))*dot(M2atmp(:),M2atmp(:))))/maxcost; - end; - % annealing step - if (costnew < lowcost) || (rand < exp(-(costnew-lowcost)/T)) - anew = atmp; - lowcost = costnew; - % is this the absolute best? - if (lowcost -im = [i(1) i(2)]; -jm = [j(1) j(2)]; - -% copy into tree graph -CIJtree(im,jm) = CIJ(im,jm); -in = im; -out = setdiff(1:N,in); - -% repeat N-2 times -for n=1:N-2 - - % find strongest link between 'in' and 'out',ignore tied ranks - [i,j,s] = find(max(max(CIJ(in,out)))==CIJ(in,out)); - im = in(i(1)); - jm = out(j(1)); - - % copy into tree graph - CIJtree(im,jm) = CIJ(im,jm); CIJtree(jm,im) = CIJ(jm,im); - in = [in jm]; %#ok - out = setdiff(1:N,in); - -end; - -% now add connections back, with the total number of added connections -% determined by the desired 'avgdeg' -CIJnotintree = CIJ.*~CIJtree; -[a,b] = sort(nonzeros(CIJnotintree),'descend'); -cutoff = avgdeg*N - 2*(N-1); -thr = a(cutoff); -CIJclus = CIJtree + CIJnotintree.*(CIJnotintree>=thr); - diff --git a/DefaultData/2019_03_03_BCT/betweenness_bin.m b/DefaultData/2019_03_03_BCT/betweenness_bin.m deleted file mode 100755 index 2dcd104..0000000 --- a/DefaultData/2019_03_03_BCT/betweenness_bin.m +++ /dev/null @@ -1,53 +0,0 @@ -function BC=betweenness_bin(G) -%BETWEENNESS_BIN Node betweenness centrality -% -% BC = betweenness_bin(A); -% -% Node betweenness centrality is the fraction of all shortest paths in -% the network that contain a given node. Nodes with high values of -% betweenness centrality participate in a large number of shortest paths. -% -% Input: A, binary (directed/undirected) connection matrix. -% -% Output: BC, node betweenness centrality vector. -% -% Note: Betweenness centrality may be normalised to the range [0,1] as -% BC/[(N-1)(N-2)], where N is the number of nodes in the network. -% -% Reference: Kintali (2008) arXiv:0809.1906v2 [cs.DS] -% (generalization to directed and disconnected graphs) -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2012 - - -n=length(G); %number of nodes -I=eye(n)~=0; %logical identity matrix -d=1; %path length -NPd=G; %number of paths of length |d| -NSPd=NPd; %number of shortest paths of length |d| -NSP=NSPd; NSP(I)=1; %number of shortest paths of any length -L=NSPd; L(I)=1; %length of shortest paths - -%calculate NSP and L -while find(NSPd,1) - d=d+1; - NPd=NPd*G; - NSPd=NPd.*(L==0); - NSP=NSP+NSPd; - L=L+d.*(NSPd~=0); -end -L(~L)=inf; L(I)=0; %L for disconnected vertices is inf -NSP(~NSP)=1; %NSP for disconnected vertices is 1 - -Gt=G.'; -DP=zeros(n); %vertex on vertex dependency -diam=d-1; %graph diameter - -%calculate DP -for d=diam:-1:2 - DPd1=(((L==d).*(1+DP)./NSP)*Gt).*((L==(d-1)).*NSP); - DP=DP + DPd1; %DPd1: dependencies on vertices |d-1| from source -end - -BC=sum(DP,1); %compute betweenness \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/betweenness_wei.m b/DefaultData/2019_03_03_BCT/betweenness_wei.m deleted file mode 100755 index 6ea3b01..0000000 --- a/DefaultData/2019_03_03_BCT/betweenness_wei.m +++ /dev/null @@ -1,76 +0,0 @@ -function BC=betweenness_wei(G) -%BETWEENNESS_WEI Node betweenness centrality -% -% BC = betweenness_wei(L); -% -% Node betweenness centrality is the fraction of all shortest paths in -% the network that contain a given node. Nodes with high values of -% betweenness centrality participate in a large number of shortest paths. -% -% Input: L, Directed/undirected connection-length matrix. -% -% Output: BC, node betweenness centrality vector. -% -% Notes: -% The input matrix must be a connection-length matrix, typically -% obtained via a mapping from weight to length. For instance, in a -% weighted correlation network higher correlations are more naturally -% interpreted as shorter distances and the input matrix should -% consequently be some inverse of the connectivity matrix. -% Betweenness centrality may be normalised to the range [0,1] as -% BC/[(N-1)(N-2)], where N is the number of nodes in the network. -% -% Reference: Brandes (2001) J Math Sociol 25:163-177. -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2012 - -n=length(G); -% E=find(G); G(E)=1./G(E); %invert weights -BC=zeros(n,1); %vertex betweenness - -for u=1:n - D=inf(1,n); D(u)=0; %distance from u - NP=zeros(1,n); NP(u)=1; %number of paths from u - S=true(1,n); %distance permanence (true is temporary) - P=false(n); %predecessors - Q=zeros(1,n); q=n; %order of non-increasing distance - - G1=G; - V=u; - while 1 - S(V)=0; %distance u->V is now permanent - G1(:,V)=0; %no in-edges as already shortest - for v=V - Q(q)=v; q=q-1; - W=find(G1(v,:)); %neighbours of v - for w=W - Duw=D(v)+G1(v,w); %path length to be tested - if Duww shorter than old - D(w)=Duw; - NP(w)=NP(v); %NP(u->w) = NP of new path - P(w,:)=0; - P(w,v)=1; %v is the only predecessor - elseif Duw==D(w) %if new u->w equal to old - NP(w)=NP(w)+NP(v); %NP(u->w) sum of old and new - P(w,v)=1; %v is also a predecessor - end - end - end - - minD=min(D(S)); - if isempty(minD), break %all nodes reached, or - elseif isinf(minD) %...some cannot be reached: - Q(1:q)=find(isinf(D)); break %...these are first-in-line - end - V=find(D==minD); - end - - DP=zeros(n,1); %dependency - for w=Q(1:n-1) - BC(w)=BC(w)+DP(w); - for v=find(P(w,:)) - DP(v)=DP(v)+(1+DP(w)).*NP(v)./NP(w); - end - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/breadth.m b/DefaultData/2019_03_03_BCT/breadth.m deleted file mode 100755 index b1cc339..0000000 --- a/DefaultData/2019_03_03_BCT/breadth.m +++ /dev/null @@ -1,62 +0,0 @@ -function [distance,branch] = breadth(CIJ,source) -%BREADTH Auxiliary function for breadthdist.m -% -% [distance,branch] = breadth(CIJ,source); -% -% Implementation of breadth-first search. -% -% Input: CIJ, binary (directed/undirected) connection matrix -% source, source vertex -% -% Outputs: distance, distance between 'source' and i'th vertex -% (0 for source vertex) -% branch, vertex that precedes i in the breadth-first search tree -% (-1 for source vertex) -% -% Notes: Breadth-first search tree does not contain all paths (or all -% shortest paths), but allows the determination of at least one path with -% minimum distance. The entire graph is explored, starting from source -% vertex 'source'. -% -% -% Olaf Sporns, Indiana University, 2002/2007/2008 - -N = size(CIJ,1); - -% colors: white, gray, black -white = 0; -gray = 1; -black = 2; - -% initialize colors -color = zeros(1,N); -% initialize distances -distance = inf*ones(1,N); -% initialize branches -branch = zeros(1,N); - -% start on vertex 'source' -color(source) = gray; -distance(source) = 0; -branch(source) = -1; -Q = source; - -% keep going until the entire graph is explored -while ~isempty(Q) - u = Q(1); - ns = find(CIJ(u,:)); - for v=ns -% this allows the 'source' distance to itself to be recorded - if (distance(v)==0) - distance(v) = distance(u)+1; - end; - if (color(v)==white) - color(v) = gray; - distance(v) = distance(u)+1; - branch(v) = u; - Q = [Q v]; %#ok - end; - end; - Q = Q(2:length(Q)); - color(u) = black; -end diff --git a/DefaultData/2019_03_03_BCT/breadthdist.m b/DefaultData/2019_03_03_BCT/breadthdist.m deleted file mode 100755 index 3d189f9..0000000 --- a/DefaultData/2019_03_03_BCT/breadthdist.m +++ /dev/null @@ -1,39 +0,0 @@ -function [R,D] = breadthdist(CIJ) -%BREADTHDIST Reachability and distance matrices -% -% [R,D] = breadthdist(CIJ); -% -% The binary reachability matrix describes reachability between all pairs -% of nodes. An entry (u,v)=1 means that there exists a path from node u -% to node v; alternatively (u,v)=0. -% -% The distance matrix contains lengths of shortest paths between all -% pairs of nodes. An entry (u,v) represents the length of shortest path -% from node u to node v. The average shortest path length is the -% characteristic path length of the network. -% -% Input: CIJ, binary (directed/undirected) connection matrix -% -% Outputs: R, reachability matrix -% D, distance matrix -% -% Note: slower but less memory intensive than "reachdist.m". -% -% Algorithm: Breadth-first search. -% -% -% Olaf Sporns, Indiana University, 2002/2007/2008 - -N = size(CIJ,1); - -D = zeros(N); -for i=1:N - D(i,:) = breadth(CIJ,i); -end; - -% replace zeros with 'Inf's -D(D==0) = Inf; - -% construct R -R = double(D~=Inf); - diff --git a/DefaultData/2019_03_03_BCT/charpath.m b/DefaultData/2019_03_03_BCT/charpath.m deleted file mode 100755 index 0866997..0000000 --- a/DefaultData/2019_03_03_BCT/charpath.m +++ /dev/null @@ -1,79 +0,0 @@ -function [lambda,efficiency,ecc,radius,diameter] = charpath(D,diagonal_dist,infinite_dist) -%CHARPATH Characteristic path length, global efficiency and related statistics -% -% lambda = charpath(D); -% lambda = charpath(D); -% [lambda,efficiency] = charpath(D); -% [lambda,efficiency,ecc,radius,diameter] = charpath(D,diagonal_dist,infinite_dist); -% -% The network characteristic path length is the average shortest path -% length between all pairs of nodes in the network. The global efficiency -% is the average inverse shortest path length in the network. The nodal -% eccentricity is the maximal path length between a node and any other -% node in the network. The radius is the minimal eccentricity, and the -% diameter is the maximal eccentricity. -% -% Input: D, distance matrix -% diagonal_dist optional argument -% include distances on the main diagonal -% (default: diagonal_dist=0) -% infinite_dist optional argument -% include infinite distances in calculation -% (default: infinite_dist=1) -% -% Outputs: lambda, network characteristic path length -% efficiency, network global efficiency -% ecc, nodal eccentricity -% radius, network radius -% diameter, network diameter -% -% Notes: -% The input distance matrix may be obtained with any of the distance -% functions, e.g. distance_bin, distance_wei. -% Characteristic path length is defined here as the mean shortest -% path length between all pairs of nodes, for consistency with common -% usage. Note that characteristic path length is also defined as the -% median of the mean shortest path length from each node to all other -% nodes. -% Infinitely long paths (i.e. paths between disconnected nodes) are -% included in computations by default. This behavior may be modified with -% via the infinite_dist argument. -% -% -% Olaf Sporns, Indiana University, 2002/2007/2008 -% Mika Rubinov, U Cambridge, 2010/2015 - -% Modification history -% 2002: original (OS) -% 2010: incorporation of global efficiency (MR) -% 2015: exclusion of diagonal weights by default (MR) -% 2016: inclusion of infinite distances by default (MR) - -n = size(D,1); -if any(any(isnan(D))) - error('The distance matrix must not contain NaN values'); -end -if ~exist('diagonal_dist','var') || ~diagonal_dist || isempty(diagonal_dist) - D(1:n+1:end) = NaN; % set diagonal distance to NaN -end -if exist('infinite_dist','var') && ~infinite_dist - D(isinf(D)) = NaN; % ignore infinite path lengths -end - -Dv = D(~isnan(D)); % get non-NaN indices of D - -% Mean of entries of D(G) -lambda = mean(Dv); - -% Efficiency: mean of inverse entries of D(G) -efficiency = mean(1./Dv); - -% Eccentricity for each vertex -ecc = nanmax(D,[],2); - -% Radius of graph -radius = min(ecc); - -% Diameter of graph -diameter = max(ecc); - diff --git a/DefaultData/2019_03_03_BCT/clique_communities.m b/DefaultData/2019_03_03_BCT/clique_communities.m deleted file mode 100755 index 76aa90e..0000000 --- a/DefaultData/2019_03_03_BCT/clique_communities.m +++ /dev/null @@ -1,102 +0,0 @@ -function M = clique_communities(A, cq_thr) -% CLIQUE_COMMUNITIES Overlapping community structure via clique percolation -% -% M = clique_communities(A, cq_thr) -% -% The optimal community structure is a subdivision of the network into -% groups of nodes which have a high number of within-group connections -% and a low number of between group connections. -% -% This algorithm uncovers overlapping community structure in binary -% undirected networks via the clique percolation method. -% -% Inputs: -% A, Binary undirected connection matrix. -% -% cq_thr, Clique size threshold (integer). Larger clique size -% thresholds potentially result in larger communities. -% -% Output: -% M, Overlapping community-affiliation matrix -% Binary matrix of size CxN [communities x nodes] -% -% Algorithms: -% Bron–Kerbosch algorithm for detection of maximal cliques. -% Dulmage-Mendelsohn decomposition for detection of components -% (implemented in get_components.m) -% -% -% Note: This algorithm can be slow and memory intensive in large -% matrices. The algorithm requires the function get_components.m -% -% Reference: Palla et al. (2005) Nature 435, 814-818. -% -% Mika Rubinov, Janelia HHMI, 2017 - -if ~isequal(A, A.') - error('A must be undirected.') -end -if ~isequal(size(A, 1), size(A, 2)) - error('A must be square.') -end -if ~issparse(A) - A = sparse(A); -end -if ~islogical(A) - A = logical(A); -end - -n = length(A); % number of nodes -A(1:n+1:end) = 0; % clear diagonal -MQ = maximal_cliques(A, n); % get maximal cliques -Cq = double(cell2mat(MQ)).'; % convert to matrix -Cq = Cq(sum(Cq, 2) >= cq_thr, :); % remove subthreshold cliques -Ov = Cq * Cq.'; % compute clique overlap -Ov_thr = (Ov >= cq_thr - 1); % keep percolating cliques - -Cq_components = get_components(Ov_thr); % find components - -m = max(Cq_components); % get number of components -M = zeros(m, n); % collect communities -for i = 1:m - M(i, any( Cq(Cq_components==i, :), 1)) = 1; -end - -end - -function MQ = maximal_cliques(A, n) % Bron-Kerbosch algorithm - -MQ = cell(1, 1000*n); - -R = false(n, 1); %current -P = true(n, 1); %prospective -X = false(n, 1); %processed -q = 0; - -BK(R, P, X); - - function BK(R, P, X) - if ~any(P | X) - q = q + 1; - MQ{q} = R; - else - U_p = find(any([P X], 2)); - [~, idx] = max(A(:,U_p).' * double(P)); - u_p = U_p(idx); - - U = find(all([P ~A(:,u_p)], 2)).'; - for u = U - Nu = A(:,u); - P(u) = 0; - Rnew = R; Rnew(u) = 1; - Pnew = all([P Nu],2); - Xnew = all([X Nu],2); - BK(Rnew, Pnew, Xnew) - X(u) = 1; - end - end - end - -MQ=MQ(1:q); - -end diff --git a/DefaultData/2019_03_03_BCT/clustering_coef_bd.m b/DefaultData/2019_03_03_BCT/clustering_coef_bd.m deleted file mode 100755 index 8506341..0000000 --- a/DefaultData/2019_03_03_BCT/clustering_coef_bd.m +++ /dev/null @@ -1,35 +0,0 @@ -function C=clustering_coef_bd(A) -%CLUSTERING_COEF_BD Clustering coefficient -% -% C = clustering_coef_bd(A); -% -% The clustering coefficient is the fraction of triangles around a node -% (equiv. the fraction of node's neighbors that are neighbors of each other). -% -% Input: A, binary directed connection matrix -% -% Output: C, clustering coefficient vector -% -% Reference: Fagiolo (2007) Phys Rev E 76:026107. -% -% -% Mika Rubinov, UNSW, 2007-2010 - -%Methodological note: In directed graphs, 3 nodes generate up to 8 -%triangles (2*2*2 edges). The number of existing triangles is the main -%diagonal of S^3/2. The number of all (in or out) neighbour pairs is -%K(K-1)/2. Each neighbour pair may generate two triangles. "False pairs" -%are i<->j edge pairs (these do not generate triangles). The number of -%false pairs is the main diagonal of A^2. -%Thus the maximum possible number of triangles = -% = (2 edges)*([ALL PAIRS] - [FALSE PAIRS]) -% = 2 * (K(K-1)/2 - diag(A^2)) -% = K(K-1) - 2(diag(A^2)) - -S=A+A.'; %symmetrized input graph -K=sum(S,2); %total degree (in + out) -cyc3=diag(S^3)/2; %number of 3-cycles (ie. directed triangles) -K(cyc3==0)=inf; %if no 3-cycles exist, make C=0 (via K=inf) -CYC3=K.*(K-1)-2*diag(A^2); %number of all possible 3-cycles -C=cyc3./CYC3; %clustering coefficient - diff --git a/DefaultData/2019_03_03_BCT/clustering_coef_bu.m b/DefaultData/2019_03_03_BCT/clustering_coef_bu.m deleted file mode 100755 index ae492be..0000000 --- a/DefaultData/2019_03_03_BCT/clustering_coef_bu.m +++ /dev/null @@ -1,28 +0,0 @@ -function C=clustering_coef_bu(G) -%CLUSTERING_COEF_BU Clustering coefficient -% -% C = clustering_coef_bu(A); -% -% The clustering coefficient is the fraction of triangles around a node -% (equiv. the fraction of node's neighbors that are neighbors of each other). -% -% Input: A, binary undirected connection matrix -% -% Output: C, clustering coefficient vector -% -% Reference: Watts and Strogatz (1998) Nature 393:440-442. -% -% -% Mika Rubinov, UNSW, 2007-2010 - -n=length(G); -C=zeros(n,1); - -for u=1:n - V=find(G(u,:)); - k=length(V); - if k>=2 %degree must be at least 2 - S=G(V,V); - C(u)=sum(S(:))/(k^2-k); - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/clustering_coef_wd.m b/DefaultData/2019_03_03_BCT/clustering_coef_wd.m deleted file mode 100755 index 059c29e..0000000 --- a/DefaultData/2019_03_03_BCT/clustering_coef_wd.m +++ /dev/null @@ -1,42 +0,0 @@ -function C=clustering_coef_wd(W) -%CLUSTERING_COEF_WD Clustering coefficient -% -% C = clustering_coef_wd(W); -% -% The weighted clustering coefficient is the average "intensity" -% (geometric mean) of all triangles associated with each node. -% -% Input: W, weighted directed connection matrix -% (all weights must be between 0 and 1) -% -% Output: C, clustering coefficient vector -% -% Reference: Fagiolo (2007) Phys Rev E 76:026107. -% -% Note: All weights must be between 0 and 1. -% This may be achieved using the weight_conversion.m function, -% W_nrm = weight_conversion(W, 'normalize'); -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2015 - -% Modification history: -% 2007: original -% 2015: expanded documentation - - -% Methodological note (also see clustering_coef_bd) -% The weighted modification is as follows: -% - The numerator: adjacency matrix is replaced with weights matrix ^ 1/3 -% - The denominator: no changes from the binary version -% -% The above reduces to symmetric and/or binary versions of the clustering -% coefficient for respective graphs. - -A=W~=0; %adjacency matrix -S=W.^(1/3)+(W.').^(1/3); %symmetrized weights matrix ^1/3 -K=sum(A+A.',2); %total degree (in + out) -cyc3=diag(S^3)/2; %number of 3-cycles (ie. directed triangles) -K(cyc3==0)=inf; %if no 3-cycles exist, make C=0 (via K=inf) -CYC3=K.*(K-1)-2*diag(A^2); %number of all possible 3-cycles -C=cyc3./CYC3; %clustering coefficient - diff --git a/DefaultData/2019_03_03_BCT/clustering_coef_wu.m b/DefaultData/2019_03_03_BCT/clustering_coef_wu.m deleted file mode 100755 index bd18514..0000000 --- a/DefaultData/2019_03_03_BCT/clustering_coef_wu.m +++ /dev/null @@ -1,30 +0,0 @@ -function C=clustering_coef_wu(W) -%CLUSTERING_COEF_WU Clustering coefficient -% -% C = clustering_coef_wu(W); -% -% The weighted clustering coefficient is the average "intensity" -% (geometric mean) of all triangles associated with each node. -% -% Input: W, weighted undirected connection matrix -% (all weights must be between 0 and 1) -% -% Output: C, clustering coefficient vector -% -% Note: All weights must be between 0 and 1. -% This may be achieved using the weight_conversion.m function, -% W_nrm = weight_conversion(W, 'normalize'); -% -% Reference: Onnela et al. (2005) Phys Rev E 71:065103 -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2015 - -% Modification history: -% 2007: original -% 2015: expanded documentation - -K=sum(W~=0,2); -cyc3=diag((W.^(1/3))^3); -K(cyc3==0)=inf; %if no 3-cycles exist, make C=0 (via K=inf) -C=cyc3./(K.*(K-1)); %clustering coefficient diff --git a/DefaultData/2019_03_03_BCT/clustering_coef_wu_sign.m b/DefaultData/2019_03_03_BCT/clustering_coef_wu_sign.m deleted file mode 100755 index 00bc691..0000000 --- a/DefaultData/2019_03_03_BCT/clustering_coef_wu_sign.m +++ /dev/null @@ -1,136 +0,0 @@ -function [C_pos,C_neg,Ctot_pos,Ctot_neg] = clustering_coef_wu_sign(W,coef_type) -%CLUSTERING_COEF_WU_SIGN Multiple generalizations of the clustering coefficient -% -% [C_pos,C_neg,Ctot_pos,Ctot_neg] = clustering_coef_wu_sign(W,coef_type); -% -% The weighted clustering coefficient is the average weight or intensity -% of all triangles associated with each node. -% -% Inputs: -% W, -% Weighted undirected connection matrix -% -% corr_type, -% Desired type of clustering coefficient. -% Options: -% 1, (default) Onnela et al. formula, used in original -% clustering_coef_wu.m. Computed separately for positive & -% negative weights. -% 2, Zhang & Horvath formula, similar to Onnela formula except -% denominator of Onnela formula relies on binarizing the -% network whereas this denominator is based on weight value, -% which reduces the sensitivity of this measure to the -% weights directly connected to the node of interest. -% Computed separately for positive & negative weights. -% 3, Constantini & Perugini's generalization of the Zhang & -% Horvath formula. This formula takes both positive & -% negative weights into account simultaneously, & is -% particularly sensitive to non-redundancy in path -% information based on sign (i.e., when two weights are -% positive & one negative, or all three are negative, both of -% which indicate that the weight of the third path is not -% redundant information). Produces only one value. -% -% -% Outputs: -% C_pos/C_neg, -% Clustering coefficient vector for positive/negative weights. -% For the third option, only one vector is outputted (as C_pos). -% Ctot_pos/Ctot_neg, -% Mean clustering coefficient for positive and negative weights. -% -% References: -% Onnela et al. (2005) Phys Rev E 71:065103 -% Zhang & Horvath (2005) Stat Appl Genet Mol Biol 41:1544-6115 -% Costantini & Perugini (2014) PLOS ONE 9:e88669 -% -% -% Contributor: Jeff Spielberg, Boston University, 2014-2015 -% (script based on clustering_coef_wu.m) - -% -% Modification History: -% May 2014: Added computation of pos & neg weights separately & -% computation of mean coefficient (Jeff Spielberg) -% May 2015: Added computation of Zhang & Horvath and Constantini & -% Perugini formulas (Jeff Spielberg) -% May 2016: Bugfix in computation of the denominator of the Costantini & -% Perugini (flag 3) version (Chiara Pintossi) - -if ~exist('coef_type','var') - coef_type = 1; -end - -n = length(W); %number of nodes -W(1:n+1:end) = 0; - -switch coef_type - case 1 - W_pos = W.*(W>0); - K_pos = sum(W_pos~=0,2); - cyc3_pos = diag((W_pos.^(1/3))^3); - K_pos(cyc3_pos == 0) = inf; %if no 3-cycles exist, make C=0 (via K=inf) - C_pos = cyc3_pos./(K_pos.*(K_pos-1)); %clustering coefficient - Ctot_pos = mean(C_pos); - - W_neg = -W.*(W<0); - K_neg = sum(W_neg~=0,2); - cyc3_neg = diag((W_neg.^(1/3))^3); - K_neg(cyc3_neg == 0) = inf; %if no 3-cycles exist, make C=0 (via K=inf) - C_neg = cyc3_neg./(K_neg.*(K_neg-1)); %clustering coefficient - Ctot_neg = mean(C_neg); - case 2 - W_pos = W.*(W>0); - cyc3_pos = zeros(n,1); - cyc2_pos = zeros(n,1); - for i = 1:n - for j = 1:n - for q = 1:n - cyc3_pos(i) = cyc3_pos(i)+(W_pos(j,i)*W_pos(i,q)*W_pos(j,q)); - if j~=q - cyc2_pos(i) = cyc2_pos(i)+(W_pos(j,i)*W_pos(i,q)); - end - end - end - end - cyc2_pos(cyc3_pos == 0) = inf; %if no 3-cycles exist, make C=0 (via K=inf) - C_pos = cyc3_pos./cyc2_pos; %clustering coefficient - Ctot_pos = mean(C_pos); - - W_neg = -W.*(W<0); - cyc3_neg = zeros(n,1); - cyc2_neg = zeros(n,1); - for i = 1:n - for j = 1:n - for q = 1:n - cyc3_neg(i) = cyc3_neg(i)+(W_neg(j,i)*W_neg(i,q)*W_neg(j,q)); - if j~=q - cyc2_neg(i) = cyc2_neg(i)+(W_neg(j,i)*W_neg(i,q)); - end - end - end - end - cyc2_neg(cyc3_neg == 0) = inf; %if no 3-cycles exist, make C=0 (via K=inf) - C_neg = cyc3_neg./cyc2_neg; %clustering coefficient - Ctot_neg = mean(C_neg); - case 3 - cyc3 = zeros(n,1); - cyc2 = zeros(n,1); - - for i = 1:n - for j = 1:n - for q = 1:n - cyc3(i) = cyc3(i)+(W(j,i)*W(i,q)*W(j,q)); - if j~=q - cyc2(i) = cyc2(i)+abs(W(j,i)*W(i,q)); - end - end - end - end - - cyc2(cyc3 == 0) = inf; %if no 3-cycles exist, make C=0 (via K=inf) - C_pos = cyc3./cyc2; %clustering coefficient - Ctot_pos = mean(C_pos); - C_neg = nan(size(C_pos)); - Ctot_neg = nan(size(Ctot_pos)); -end diff --git a/DefaultData/2019_03_03_BCT/community_louvain.m b/DefaultData/2019_03_03_BCT/community_louvain.m deleted file mode 100755 index 6fb367f..0000000 --- a/DefaultData/2019_03_03_BCT/community_louvain.m +++ /dev/null @@ -1,198 +0,0 @@ -function [M,Q]=community_louvain(W,gamma,M0,B) -%COMMUNITY_LOUVAIN Optimal community structure -% -% M = community_louvain(W); -% [M,Q] = community_louvain(W,gamma); -% [M,Q] = community_louvain(W,gamma,M0); -% [M,Q] = community_louvain(W,gamma,M0,'potts'); -% [M,Q] = community_louvain(W,gamma,M0,'negative_asym'); -% [M,Q] = community_louvain(W,[],[],B); -% -% The optimal community structure is a subdivision of the network into -% nonoverlapping groups of nodes which maximizes the number of within- -% group edges, and minimizes the number of between-group edges. -% -% This function is a fast and accurate multi-iterative generalization of -% the Louvain community detection algorithm. This function subsumes and -% improves upon, -% modularity_louvain_und.m, modularity_finetune_und.m, -% modularity_louvain_dir.m, modularity_finetune_dir.m, -% modularity_louvain_und_sign.m -% and additionally allows to optimize other objective functions (includes -% built-in Potts-model Hamiltonian, allows for custom objective-function -% matrices). -% -% Inputs: -% W, -% directed/undirected weighted/binary connection matrix with -% positive and possibly negative weights. -% gamma, -% resolution parameter (optional) -% gamma>1, detects smaller modules -% 0<=gamma<1, detects larger modules -% gamma=1, classic modularity (default) -% M0, -% initial community affiliation vector (optional) -% B, -% objective-function type or custom objective matrix (optional) -% 'modularity', modularity (default) -% 'potts', Potts-model Hamiltonian (for binary networks) -% 'negative_sym', symmetric treatment of negative weights -% 'negative_asym', asymmetric treatment of negative weights -% B, custom objective-function matrix -% -% Note: see Rubinov and Sporns (2011) for a discussion of -% symmetric vs. asymmetric treatment of negative weights. -% -% Outputs: -% M, -% community affiliation vector -% Q, -% optimized community-structure statistic (modularity by default) -% -% Example: -% % Iterative community finetuning. -% % W is the input connection matrix. -% n = size(W,1); % number of nodes -% M = 1:n; % initial community affiliations -% Q0 = -1; Q1 = 0; % initialize modularity values -% while Q1-Q0>1e-5; % while modularity increases -% Q0 = Q1; % perform community detection -% [M, Q1] = community_louvain(W, [], M); -% end -% -% References: -% Blondel et al. (2008) J. Stat. Mech. P10008. -% Reichardt and Bornholdt (2006) Phys. Rev. E 74, 016110. -% Ronhovde and Nussinov (2008) Phys. Rev. E 80, 016109 -% Sun et al. (2008) Europhysics Lett 86, 28004. -% Rubinov and Sporns (2011) Neuroimage 56:2068-79. -% -% Mika Rubinov, U Cambridge 2015-2016 - -% Modification history -% 2015: Original -% 2016: Included generalization for negative weights. -% Enforced binary network input for Potts-model Hamiltonian. -% Streamlined code and expanded documentation. - -W=double(W); % convert to double format -n=length(W); % get number of nodes -s=sum(sum(W)); % get sum of edges - -if ~exist('B','var') || isempty(B) - type_B = 'modularity'; -elseif ischar(B) - type_B = B; -else - type_B = 0; - if exist('gamma','var') && ~isempty(gamma) - warning('Value of gamma is ignored in generalized mode.') - end -end -if ~exist('gamma','var') || isempty(gamma) - gamma = 1; -end - -if strcmp(type_B,'negative_sym') || strcmp(type_B,'negative_asym') - W0 = W.*(W>0); %positive weights matrix - s0 = sum(sum(W0)); %weight of positive links - B0 = W0-gamma*(sum(W0,2)*sum(W0,1))/s0; %positive modularity - - W1 =-W.*(W<0); %negative weights matrix - s1 = sum(sum(W1)); %weight of negative links - if s1 %negative modularity - B1 = W1-gamma*(sum(W1,2)*sum(W1,1))/s1; - else - B1 = 0; - end -elseif min(min(W))<-1e-10 - err_string = [ - 'The input connection matrix contains negative weights.\nSpecify ' ... - '''negative_sym'' or ''negative_asym'' objective-function types.']; - error(sprintf(err_string)) %#ok -end -if strcmp(type_B,'potts') && any(any(W ~= logical(W))) - error('Potts-model Hamiltonian requires a binary W.') -end - -if type_B - switch type_B - case 'modularity'; B = (W-gamma*(sum(W,2)*sum(W,1))/s)/s; - case 'potts'; B = W-gamma*(~W); - case 'negative_sym'; B = B0/(s0+s1) - B1/(s0+s1); - case 'negative_asym'; B = B0/s0 - B1/(s0+s1); - otherwise; error('Unknown objective function.'); - end -else % custom objective function matrix as input - B = double(B); - if ~isequal(size(W),size(B)) - error('W and B must have the same size.') - end -end -if ~exist('M0','var') || isempty(M0) - M0=1:n; -elseif numel(M0)~=n - error('M0 must contain n elements.') -end - -[~,~,Mb] = unique(M0); -M = Mb; - -B = (B+B.')/2; % symmetrize modularity matrix -Hnm=zeros(n,n); % node-to-module degree -for m=1:max(Mb) % loop over modules - Hnm(:,m)=sum(B(:,Mb==m),2); -end - -Q0 = -inf; -Q = sum(B(bsxfun(@eq,M0,M0.'))); % compute modularity -first_iteration = true; -while Q-Q0>1e-10 - flag = true; % flag for within-hierarchy search - while flag - flag = false; - for u=randperm(n) % loop over all nodes in random order - ma = Mb(u); % current module of u - dQ = Hnm(u,:) - Hnm(u,ma) + B(u,u); - dQ(ma) = 0; % (line above) algorithm condition - - [max_dQ,mb] = max(dQ); % maximal increase in modularity and corresponding module - if max_dQ>1e-10 % if maximal increase is positive - flag = true; - Mb(u) = mb; % reassign module - - Hnm(:,mb) = Hnm(:,mb)+B(:,u); % change node-to-module strengths - Hnm(:,ma) = Hnm(:,ma)-B(:,u); - end - end - end - [~,~,Mb] = unique(Mb); % new module assignments - - M0 = M; - if first_iteration - M=Mb; - first_iteration=false; - else - for u=1:n % loop through initial module assignments - M(M0==u)=Mb(u); % assign new modules - end - end - - n=max(Mb); % new number of modules - B1=zeros(n); % new weighted matrix - for u=1:n - for v=u:n - bm=sum(sum(B(Mb==u,Mb==v))); % pool weights of nodes in same module - B1(u,v)=bm; - B1(v,u)=bm; - end - end - B=B1; - - Mb=1:n; % initial module assignments - Hnm=B; % node-to-module strength - - Q0=Q; - Q=trace(B); % compute modularity -end diff --git a/DefaultData/2019_03_03_BCT/consensus_und.m b/DefaultData/2019_03_03_BCT/consensus_und.m deleted file mode 100755 index 089b1e0..0000000 --- a/DefaultData/2019_03_03_BCT/consensus_und.m +++ /dev/null @@ -1,96 +0,0 @@ -function ciu = consensus_und(d,tau,reps) -% CONSENSUS_UND Consensus clustering -% -% CIU = CONSENSUS(D,TAU,REPS) seeks a consensus partition of the -% agreement matrix D. The algorithm used here is almost identical to the -% one introduced in Lancichinetti & Fortunato (2012): The agreement -% matrix D is thresholded at a level TAU to remove an weak elements. The -% resulting matrix is then partitions REPS number of times using the -% Louvain algorithm (in principle, any clustering algorithm that can -% handle weighted matrixes is a suitable alternative to the Louvain -% algorithm and can be substituted in its place). This clustering -% produces a set of partitions from which a new agreement is built. If -% the partitions have not converged to a single representative partition, -% the above process repeats itself, starting with the newly built -% agreement matrix. -% -% NOTE: In this implementation, the elements of the agreement matrix must -% be converted into probabilities. -% -% NOTE: This implementation is slightly different from the original -% algorithm proposed by Lanchichinetti & Fortunato. In its original -% version, if the thresholding produces singleton communities, those -% nodes are reconnected to the network. Here, we leave any singleton -% communities disconnected. -% -% Inputs: D, agreement matrix with entries between 0 and 1 -% denoting the probability of finding node i in the -% same cluster as node j -% TAU, threshold which controls the resolution of the -% reclustering -% REPS, number of times that the clustering algorithm is -% reapplied -% -% Outputs: CIU, consensus partition -% -% References: Lancichinetti & Fortunato (2012). Consensus clustering in -% complex networks. Scientific Reports 2, Article number: 336. -% -% Richard Betzel, Indiana University, 2012 -% -% modified on 3/2014 to include "unique_partitions" - -n = length(d); flg = 1; -while flg == 1 - - flg = 0; - dt = d.*(d >= tau).*~eye(n); - if nnz(dt) == 0 - ciu = (1:n)'; - else - ci = zeros(n,reps); - for iter = 1:reps - ci(:,iter) = community_louvain(dt); - end - ci = relabel_partitions(ci); - ciu = unique_partitions(ci); - nu = size(ciu,2); - if nu > 1 - flg = 1; - d = agreement(ci)./reps; - end - end - -end - -function cinew = relabel_partitions(ci) -[n,m] = size(ci); -cinew = zeros(n,m); -for i = 1:m - c = ci(:,i); - d = zeros(size(c)); - count = 0; - while sum(d ~= 0) < n - count = count + 1; - ind = find(c,1,'first'); - tgt = c(ind); - rep = c == tgt; - d(rep) = count; - c(rep) = 0; - end - cinew(:,i) = d; -end - -function ciu = unique_partitions(ci) -ci = relabel_partitions(ci); -ciu = []; -count = 0; -c = 1:size(ci,2); -while ~isempty(ci) - count = count + 1; - tgt = ci(:,1); - ciu = [ciu,tgt]; %#ok - dff = sum(abs(bsxfun(@minus,ci,tgt))) == 0; - ci(:,dff) = []; - c(dff) = []; -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/core_periphery_dir.m b/DefaultData/2019_03_03_BCT/core_periphery_dir.m deleted file mode 100755 index c607be4..0000000 --- a/DefaultData/2019_03_03_BCT/core_periphery_dir.m +++ /dev/null @@ -1,88 +0,0 @@ -function [C, q]=core_periphery_dir(W,gamm,C) -%CORE_PERIPHERY_DIR Core/periphery structure and core-ness statistic -% -% C = core_periphery_dir(W) -% [C,q] = core_periphery_dir(W,gamm,C0) -% -% The optimal core/periphery subdivision is a partition of the network -% into two non-overlapping groups of nodes, a core group and a periphery -% group, in a way that maximizes the number/weight of within core-group -% edges, and minimizes the number/weight of within periphery-group edges. -% -% The core-ness is a statistic which quantifies the goodness of the -% optimal core/periphery subdivision. -% -% Input: W directed (weighted or binary) connection matrix. -% gamma, core-ness resolution parameter (optional) -% gamma>1 detects small core/large periphery -% 0<=gamma<1 detects large core/small periphery -% default is gamma=1 -% -% Outputs: C, binary vector of optimal core structure -% C = 1 represents nodes in the core -% C = 0 represents nodes in the periphery -% q, maximized core-ness statistic -% -% Algorithm: A version of Kernighan-Lin algorithm for graph partitioning -% used in community detection (Newman, 2006) applied to optimize a -% core-structure objective described in Borgatti and Everett (2000). -% -% Reference: Borgatti and Everett (2000) Soc Networks 21:375–395. -% Newman (2006) Phys Rev E 74:036104, PNAS 23:8577-8582. -% Rubinov, Ypma et al. (2015) PNAS 112:10032-7 -% -% 2015, Mika Rubinov, U Cambridge - -n = length(W); % number of nodes -W = double(W); % convert from logical -W(1:n+1:end) = 0; % clear diagonal -if ~exist('gamm','var') - gamm = 1; -end -if ~exist('C','var') - C = (rand(1,n)<0.5); -else - C = logical(reshape(C,1,n)); -end - -% Methodological note: cf. community detection, the core-detection -% null model is not corrected for degree (to enable detection of hubs). -s = sum(W(:)); -p = mean(W(:)); -b = W - gamm*p; -B = (b+b.')/(2*s); % directed core-ness matrix -q = sum(sum(B(C,C))) - sum(sum(B(~C,~C))); % core-ness statistic - -f=1; % loop flag -while f - f=0; - Idx = 1:n; % initial node indices - Ct = C; - while any(Idx) - Qt = zeros(1,n); % check swaps of node indices - q0 = sum(sum(B(Ct,Ct))) - sum(sum(B(~Ct,~Ct))); - Qt( Ct) = q0 - 2*sum(B( Ct, :),2); - Qt(~Ct) = q0 + 2*sum(B(~Ct, :),2); - - %%% verification that the above update is equivalent to: - % for u=Idx - % Ct(u) = ~Ct(u); - % Qt(u) = sum(sum(B(Ct,Ct))) - sum(sum(B(~Ct,~Ct))); - % Ct(u) = ~Ct(u); - % end - - max_Qt = max(Qt(Idx)); % make swap with maximal - u = find(abs(Qt(Idx)-max_Qt)<1e-10);% increase in core-ness - u = u(randi(numel(u))); - Ct(Idx(u)) = ~Ct(Idx(u)); - Idx(u)=[]; % remove index from consideration - - if max_Qt-q>1e-10 % recompute core-ness statistic - f = 1; - C = Ct; - q = sum(sum(B(C,C))) - sum(sum(B(~C,~C))); - end - end -end - -q = sum(sum(B(C,C))) - sum(sum(B(~C,~C))); % return core-ness statistic \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/cycprob.m b/DefaultData/2019_03_03_BCT/cycprob.m deleted file mode 100755 index 0202231..0000000 --- a/DefaultData/2019_03_03_BCT/cycprob.m +++ /dev/null @@ -1,45 +0,0 @@ -function [fcyc,pcyc] = cycprob(Pq) -% CYCPROB Cycle probability -% -% [fcyc,pcyc] = cycprob(Pq); -% -% Cycles are paths which begin and end at the same node. Cycle -% probability for path length d, is the fraction of all paths of length -% d-1 that may be extended to form cycles of length d. -% -% Input: Pq, 3D matrix, with Pq(i,j,q) = number of paths from -% 'i' to 'j' of length 'q' (produced by 'findpaths') -% -% Outputs: fcyc, fraction of all paths that are cycles for each path -% length 'q'. -% pcyc, probability that a non-cyclic path of length 'q-1' -% can be extended to form a cycle of length 'q', for -% each path length 'q', -% -% -% Olaf Sporns, Indiana University, 2002/2007/2008 - - -% Note: fcyc(1) must be zero, as there cannot be cycles of length one. -fcyc = zeros(1,size(Pq,3)); -for q=1:size(Pq,3) - if(sum(sum(Pq(:,:,q)))>0) - fcyc(q) = sum(diag(Pq(:,:,q)))/sum(sum(Pq(:,:,q))); - else - fcyc(q) = 0; - end; -end; - -% Note: pcyc(1) is not defined (set to zero). -% Note: pcyc(2) is equal to the fraction of reciprocal connections, -% 'frecip', delivered by 'reciprocal.m'. -% Note: there are no non-cyclic paths of length N and no cycles of length N+1 -pcyc = zeros(1,size(Pq,3)); -for q=2:size(Pq,3) - if((sum(sum(Pq(:,:,q-1)))-sum(diag(Pq(:,:,q-1))))>0) - pcyc(q) = sum(diag(Pq(:,:,q)))/... - (sum(sum(Pq(:,:,q-1)))-sum(diag(Pq(:,:,q-1)))); - else - pcyc(q) = 0; - end; -end; diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/Coactivation_matrix.mat b/DefaultData/2019_03_03_BCT/data_and_demos/Coactivation_matrix.mat deleted file mode 100755 index a01e31b..0000000 Binary files a/DefaultData/2019_03_03_BCT/data_and_demos/Coactivation_matrix.mat and /dev/null differ diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/GroupAverage_rsfMRI_matrix.mat b/DefaultData/2019_03_03_BCT/data_and_demos/GroupAverage_rsfMRI_matrix.mat deleted file mode 100755 index 47db07b..0000000 Binary files a/DefaultData/2019_03_03_BCT/data_and_demos/GroupAverage_rsfMRI_matrix.mat and /dev/null differ diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/cat.mat b/DefaultData/2019_03_03_BCT/data_and_demos/cat.mat deleted file mode 100755 index b0a7716..0000000 Binary files a/DefaultData/2019_03_03_BCT/data_and_demos/cat.mat and /dev/null differ diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/demo_efficiency_measures.m b/DefaultData/2019_03_03_BCT/data_and_demos/demo_efficiency_measures.m deleted file mode 100755 index c6493d2..0000000 --- a/DefaultData/2019_03_03_BCT/data_and_demos/demo_efficiency_measures.m +++ /dev/null @@ -1,116 +0,0 @@ -% This script loads 7 unweighted, undirected adjacency matrices -% corresponding to a clique, chain, ring, 1D lattice, star, rich-club, and -% bi-modular toy networks (all graphs have 50 nodes). The following -% efficiency measures are computed for each graph: -% -% - prob_SPL: probability of one particle traveling through shortest-paths -% - Erout: efficiency of routing -> based on shortest-paths -% - Ediff: efficiency of diffusion -> based on mean-first-passage-times -% - Eres: efficiency of resources -> based on number of particles -% necessary so that at least one particle taking shortest-paths with -% certain probability (lambda). -% -% If you are using this efficiency package for your research, plase kindly -% cite the paper: -% -% "Exploring the Morphospace of Communication Efficiency in Complex -% Networks" Goñi J, Avena-Koenigsberger A, Velez de Mendizabal N, van den -% Heuvel M, Betzel RF and Sporns O. PLoS ONE. 2013 -% -% These examples and results correspond to Table 1 in the paper. -% -% Joaquin Goñi and Andrea Avena-Koenigsberger, IU Bloomington, 2012 - -close all; -clear all; -clc; - -load demo_efficiency_measures_data.mat; % 7 adjacency matrices corresponding to the examples shown in Table 1 are loaded. -lambda = 0.5; % this parameter is an input for the computation of Eres. - -% run and display efficiency measures for the 7 graphs -disp([' prob_SPL ',' Erout ',' Ediff ',' Eres ']) - -fprintf('----- clique ----- \n') -adj = clique; -N = size(adj,1); -EYE = logical(eye(N,N)); -SPL = distance_wei_floyd(adj); -Erout = rout_efficiency(adj); -Ediff = diffusion_efficiency(adj); -[Eres,prob_SPL] = resource_efficiency_bin(adj,lambda,SPL); -prob_SPL = mean(prob_SPL(~EYE)); -Eres = mean(Eres(~EYE)); -disp([prob_SPL,Erout,Ediff,Eres]) - -fprintf('----- chain ----- \n') -adj = chain; -N = size(adj,1); -EYE = logical(eye(N,N)); -SPL = distance_wei_floyd(adj); -Erout = rout_efficiency(adj); -Ediff = diffusion_efficiency(adj); -[Eres,prob_SPL] = resource_efficiency_bin(adj,lambda,SPL); -prob_SPL = mean(prob_SPL(~EYE)); -Eres = mean(Eres(~EYE)); -disp([prob_SPL,Erout,Ediff,Eres]) - -fprintf('----- ring ----- \n') -adj = ring; -N = size(adj,1); -EYE = logical(eye(N,N)); -SPL = distance_wei_floyd(adj); -Erout = rout_efficiency(adj); -Ediff = diffusion_efficiency(adj); -[Eres,prob_SPL] = resource_efficiency_bin(adj,lambda,SPL); -prob_SPL = mean(prob_SPL(~EYE)); -Eres = mean(Eres(~EYE)); -disp([prob_SPL,Erout,Ediff,Eres]) - -fprintf('----- lattice1D ----- \n') -adj = lattice1D; -N = size(adj,1); -EYE = logical(eye(N,N)); -SPL = distance_wei_floyd(adj); -Erout = rout_efficiency(adj); -Ediff = diffusion_efficiency(adj); -[Eres,prob_SPL] = resource_efficiency_bin(adj,lambda,SPL); -prob_SPL = mean(prob_SPL(~EYE)); -Eres = mean(Eres(~EYE)); -disp([prob_SPL,Erout,Ediff,Eres]) - -fprintf('----- star ----- \n') -adj = star; -N = size(adj,1); -EYE = logical(eye(N,N)); -SPL = distance_wei_floyd(adj); -Erout = rout_efficiency(adj); -Ediff = diffusion_efficiency(adj); -[Eres,prob_SPL] = resource_efficiency_bin(adj,lambda,SPL); -prob_SPL = mean(prob_SPL(~EYE)); -Eres = mean(Eres(~EYE)); -disp([prob_SPL,Erout,Ediff,Eres]) - -fprintf('----- rich-club ----- \n') -adj = rich_club; -N = size(adj,1); -EYE = logical(eye(N,N)); -SPL = distance_wei_floyd(adj); -Erout = rout_efficiency(adj); -Ediff = diffusion_efficiency(adj); -[Eres,prob_SPL] = resource_efficiency_bin(adj,lambda,SPL); -prob_SPL = mean(prob_SPL(~EYE)); -Eres = mean(Eres(~EYE)); -disp([prob_SPL,Erout,Ediff,Eres]) - -fprintf('----- bi-modular ----- \n') -adj = bi_modular; -N = size(adj,1); -EYE = logical(eye(N,N)); -SPL = distance_wei_floyd(adj); -Erout = rout_efficiency(adj); -Ediff = diffusion_efficiency(adj); -[Eres,prob_SPL] = resource_efficiency_bin(adj,lambda,SPL); -prob_SPL = mean(prob_SPL(~EYE)); -Eres = mean(Eres(~EYE)); -disp([prob_SPL,Erout,Ediff,Eres]) diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/demo_efficiency_measures_data.mat b/DefaultData/2019_03_03_BCT/data_and_demos/demo_efficiency_measures_data.mat deleted file mode 100755 index ca3b307..0000000 Binary files a/DefaultData/2019_03_03_BCT/data_and_demos/demo_efficiency_measures_data.mat and /dev/null differ diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/demo_generative_models_data.mat b/DefaultData/2019_03_03_BCT/data_and_demos/demo_generative_models_data.mat deleted file mode 100755 index cfcdc0a..0000000 Binary files a/DefaultData/2019_03_03_BCT/data_and_demos/demo_generative_models_data.mat and /dev/null differ diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/demo_generative_models_geometric.m b/DefaultData/2019_03_03_BCT/data_and_demos/demo_generative_models_geometric.m deleted file mode 100755 index 95adf34..0000000 --- a/DefaultData/2019_03_03_BCT/data_and_demos/demo_generative_models_geometric.m +++ /dev/null @@ -1,52 +0,0 @@ -% Demonstration of generative model functions. -% -% See GENERATIVE_MODEL and EVALUATE_GENERATIVE_MODEL for further details -% and interpretation. - -clear -close all -clc - -data = load('demo_generative_models_data'); -A = data.A; -Aseed = data.Aseed; -D = data.D; - -% get cardinality of network -n = length(A); - -% set model type -modeltype = 'sptl'; - -% set whether the model is based on powerlaw or exponentials -modelvar = [{'powerlaw'},{'powerlaw'}]; - -% choose some model parameters -nparams = 100; -params = unifrnd(-10,0,nparams,1); - -% generate synthetic networks and energy for the neighbors model; -[B,E,K] = evaluate_generative_model(Aseed,A,D,modeltype,modelvar,params); -X = [E,K]; - -% show scatterplot of parameter values versus energy and KS statistics -names = [... - {'energy'},... - {'degree'},... - {'clustering'},... - {'betweenness'},... - {'edge length'}]; - -f = figure(... - 'units','inches',... - 'position',[2,2,4,4]); -for i = 1:size(X,2) - subplot(3,2,i); - scatter(params,X(:,i),100,X(:,i),'filled'); - set(gca,... - 'ylim',[0,1],... - 'clim',[0,1]); - colormap(jet); - xlabel('geometric parameter, \eta'); - ylabel(names{i}); -end diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/demo_generative_models_neighbors.m b/DefaultData/2019_03_03_BCT/data_and_demos/demo_generative_models_neighbors.m deleted file mode 100755 index 61074dd..0000000 --- a/DefaultData/2019_03_03_BCT/data_and_demos/demo_generative_models_neighbors.m +++ /dev/null @@ -1,52 +0,0 @@ -% Demonstration of generative model functions. -% -% See GENERATIVE_MODEL and EVALUATE_GENERATIVE_MODEL for further details -% and interpretation. - -clear -close all -clc - -data = load('demo_generative_models_data'); -A = data.A; -Aseed = data.Aseed; -D = data.D; - -% get cardinality of network -n = length(A); - -% set model type -modeltype = 'matching'; - -% set whether the model is based on powerlaw or exponentials -modelvar = [{'powerlaw'},{'powerlaw'}]; - -% choose some model parameters -nparams = 100; -params = [unifrnd(-10,0,nparams,1), unifrnd(-1,1,nparams,1)]; - -% generate synthetic networks and energy for the neighbors model; -[B,E,K] = evaluate_generative_model(Aseed,A,D,modeltype,modelvar,params); -X = [E,K]; - -% show scatterplot of parameter values versus energy and KS statistics -names = [... - {'energy'},... - {'degree'},... - {'clustering'},... - {'betweenness'},... - {'edge length'}]; - -f = figure(... - 'units','inches',... - 'position',[2,2,4,4]); -for i = 1:size(X,2) - subplot(3,2,i); - scatter(params(:,1),params(:,2),100,X(:,i),'filled'); - set(gca,... - 'clim',[0,1]); - colormap(jet); - xlabel('geometric parameter, \eta'); - ylabel('topological parameter, \gamma'); - title(names{i}); -end diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/fve30.mat b/DefaultData/2019_03_03_BCT/data_and_demos/fve30.mat deleted file mode 100755 index a6e0b3d..0000000 Binary files a/DefaultData/2019_03_03_BCT/data_and_demos/fve30.mat and /dev/null differ diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/fve32.mat b/DefaultData/2019_03_03_BCT/data_and_demos/fve32.mat deleted file mode 100755 index d758330..0000000 Binary files a/DefaultData/2019_03_03_BCT/data_and_demos/fve32.mat and /dev/null differ diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/macaque47.mat b/DefaultData/2019_03_03_BCT/data_and_demos/macaque47.mat deleted file mode 100755 index b53368f..0000000 Binary files a/DefaultData/2019_03_03_BCT/data_and_demos/macaque47.mat and /dev/null differ diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/macaque71.mat b/DefaultData/2019_03_03_BCT/data_and_demos/macaque71.mat deleted file mode 100755 index 5e8a7c7..0000000 Binary files a/DefaultData/2019_03_03_BCT/data_and_demos/macaque71.mat and /dev/null differ diff --git a/DefaultData/2019_03_03_BCT/data_and_demos/motif34lib.mat b/DefaultData/2019_03_03_BCT/data_and_demos/motif34lib.mat deleted file mode 100755 index fa3c304..0000000 Binary files a/DefaultData/2019_03_03_BCT/data_and_demos/motif34lib.mat and /dev/null differ diff --git a/DefaultData/2019_03_03_BCT/degrees_dir.m b/DefaultData/2019_03_03_BCT/degrees_dir.m deleted file mode 100755 index aa77875..0000000 --- a/DefaultData/2019_03_03_BCT/degrees_dir.m +++ /dev/null @@ -1,31 +0,0 @@ -function [id,od,deg] = degrees_dir(CIJ) -%DEGREES_DIR Indegree and outdegree -% -% [id,od,deg] = degrees_dir(CIJ); -% -% Node degree is the number of links connected to the node. The indegree -% is the number of inward links and the outdegree is the number of -% outward links. -% -% Input: CIJ, directed (binary/weighted) connection matrix -% -% Output: id, node indegree -% od, node outdegree -% deg, node degree (indegree + outdegree) -% -% Notes: Inputs are assumed to be on the columns of the CIJ matrix. -% Weight information is discarded. -% -% -% Olaf Sporns, Indiana University, 2002/2006/2008 - - -% ensure CIJ is binary... -CIJ = double(CIJ~=0); - -% compute degrees -id = sum(CIJ,1); % indegree = column sum of CIJ -od = sum(CIJ,2)'; % outdegree = row sum of CIJ -deg = id+od; % degree = indegree+outdegree - - diff --git a/DefaultData/2019_03_03_BCT/degrees_und.m b/DefaultData/2019_03_03_BCT/degrees_und.m deleted file mode 100755 index 86b91b3..0000000 --- a/DefaultData/2019_03_03_BCT/degrees_und.m +++ /dev/null @@ -1,22 +0,0 @@ -function [deg] = degrees_und(CIJ) -%DEGREES_UND Degree -% -% deg = degrees_und(CIJ); -% -% Node degree is the number of links connected to the node. -% -% Input: CIJ, undirected (binary/weighted) connection matrix -% -% Output: deg, node degree -% -% Note: Weight information is discarded. -% -% -% Olaf Sporns, Indiana University, 2002/2006/2008 - - -% ensure CIJ is binary... -CIJ = double(CIJ~=0); - -deg = sum(CIJ); - diff --git a/DefaultData/2019_03_03_BCT/density_dir.m b/DefaultData/2019_03_03_BCT/density_dir.m deleted file mode 100755 index b36d0a7..0000000 --- a/DefaultData/2019_03_03_BCT/density_dir.m +++ /dev/null @@ -1,24 +0,0 @@ -function [kden,N,K] = density_dir(CIJ) -% DENSITY_DIR Density -% -% kden = density_dir(CIJ); -% [kden,N,K] = density_dir(CIJ); -% -% Density is the fraction of present connections to possible connections. -% -% Input: CIJ, directed (weighted/binary) connection matrix -% -% Output: kden, density -% N, number of vertices -% K, number of edges -% -% Notes: Assumes CIJ is directed and has no self-connections. -% Weight information is discarded. -% -% -% Olaf Sporns, Indiana University, 2002/2007/2008 - -N = size(CIJ,1); -K = nnz(CIJ); -kden = K/(N^2-N); - diff --git a/DefaultData/2019_03_03_BCT/density_und.m b/DefaultData/2019_03_03_BCT/density_und.m deleted file mode 100755 index 87debf4..0000000 --- a/DefaultData/2019_03_03_BCT/density_und.m +++ /dev/null @@ -1,28 +0,0 @@ -function [kden,N,K] = density_und(CIJ) -% DENSITY_UND Density -% -% kden = density_und(CIJ); -% [kden,N,K] = density_und(CIJ); -% -% Density is the fraction of present connections to possible connections. -% -% Input: CIJ, undirected (weighted/binary) connection matrix -% -% Output: kden, density -% N, number of vertices -% K, number of edges -% -% Notes: Assumes CIJ is undirected and has no self-connections. -% Weight information is discarded. -% -% -% Olaf Sporns, Indiana University, 2002/2007/2008 - - -% Modification history: -% 2009-10: K fixed to sum over one half of CIJ [Tony Herdman, SFU] - -N = size(CIJ,1); -K = nnz(triu(CIJ)); -kden = K/((N^2-N)/2); - diff --git a/DefaultData/2019_03_03_BCT/diffusion_efficiency.m b/DefaultData/2019_03_03_BCT/diffusion_efficiency.m deleted file mode 100755 index 835bb97..0000000 --- a/DefaultData/2019_03_03_BCT/diffusion_efficiency.m +++ /dev/null @@ -1,33 +0,0 @@ -function [GEdiff,Ediff] = diffusion_efficiency(adj) -% DIFFUSION_EFFICIENCY Global mean and pair-wise diffusion efficiency -% -% [GEdiff,Ediff] = diffusion_efficiency(adj); -% -% The diffusion efficiency between nodes i and j is the inverse of the -% mean first passage time from i to j, that is the expected number of -% steps it takes a random walker starting at node i to arrive for the -% first time at node j. Note that the mean first passage time is not a -% symmetric measure -- mfpt(i,j) may be different from mfpt(j,i) -- and -% the pair-wise diffusion efficiency matrix is hence also not symmetric. -% -% -% Input: -% adj, Weighted/Unweighted, directed/undirected adjacency matrix -% -% -% Outputs: -% GEdiff, Mean Global diffusion efficiency (scalar) -% Ediff, Pair-wise diffusion efficiency (matrix) -% -% -% References: Goñi J, et al (2013) PLoS ONE -% -% Joaquin Goñi and Andrea Avena-Koenigsberger, IU Bloomington, 2012 - - -n = size(adj,1); -mfpt = mean_first_passage_time(adj); -Ediff = 1./mfpt; -Ediff(eye(n)>0) = 0; -GEdiff = sum(Ediff(~eye(n)>0))/(n^2-n); - diff --git a/DefaultData/2019_03_03_BCT/distance_bin.m b/DefaultData/2019_03_03_BCT/distance_bin.m deleted file mode 100755 index 5210b78..0000000 --- a/DefaultData/2019_03_03_BCT/distance_bin.m +++ /dev/null @@ -1,45 +0,0 @@ -function D=distance_bin(A) -%DISTANCE_BIN Distance matrix -% -% D = distance_bin(A); -% -% The distance matrix contains lengths of shortest paths between all -% pairs of nodes. An entry (u,v) represents the length of shortest path -% from node u to node v. The average shortest path length is the -% characteristic path length of the network. -% -% Input: A, binary directed/undirected connection matrix -% -% Output: D, distance matrix -% -% Notes: -% Lengths between disconnected nodes are set to Inf. -% Lengths on the main diagonal are set to 0. -% -% Algorithm: Algebraic shortest paths. -% -% -% Mika Rubinov, U Cambridge -% Jonathan Clayden, UCL -% 2007-2013 - -% Modification history: -% 2007: Original (MR) -% 2013: Bug fix, enforce zero distance for self-connections (JC) - -A=double(A~=0); %binarize and convert to double format - -l=1; %path length -Lpath=A; %matrix of paths l -D=A; %distance matrix - -Idx=true; -while any(Idx(:)) - l=l+1; - Lpath=Lpath*A; - Idx=(Lpath~=0)&(D==0); - D(Idx)=l; -end - -D(~D)=inf; %assign inf to disconnected nodes -D(1:length(A)+1:end)=0; %clear diagonal \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/distance_wei.m b/DefaultData/2019_03_03_BCT/distance_wei.m deleted file mode 100755 index 9665051..0000000 --- a/DefaultData/2019_03_03_BCT/distance_wei.m +++ /dev/null @@ -1,71 +0,0 @@ -function [D,B]=distance_wei(L) -% DISTANCE_WEI Distance matrix (Dijkstra's algorithm) -% -% D = distance_wei(L); -% [D,B] = distance_wei(L); -% -% The distance matrix contains lengths of shortest paths between all -% pairs of nodes. An entry (u,v) represents the length of shortest path -% from node u to node v. The average shortest path length is the -% characteristic path length of the network. -% -% Input: L, Directed/undirected connection-length matrix. -% *** NB: The length matrix L isn't the weights matrix W (see below) *** -% -% Output: D, distance (shortest weighted path) matrix -% B, number of edges in shortest weighted path matrix -% -% Notes: -% The input matrix must be a connection-length matrix, typically -% obtained via a mapping from weight to length. For instance, in a -% weighted correlation network higher correlations are more naturally -% interpreted as shorter distances and the input matrix should -% consequently be some inverse of the connectivity matrix. -% The number of edges in shortest weighted paths may in general -% exceed the number of edges in shortest binary paths (i.e. shortest -% paths computed on the binarized connectivity matrix), because shortest -% weighted paths have the minimal weighted distance, but not necessarily -% the minimal number of edges. -% Lengths between disconnected nodes are set to Inf. -% Lengths on the main diagonal are set to 0. -% -% Algorithm: Dijkstra's algorithm. -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2012. -% Rick Betzel and Andrea Avena, IU, 2012 - -%Modification history -%2007: original (MR) -%2009-08-04: min() function vectorized (MR) -%2012: added number of edges in shortest path as additional output (RB/AA) -%2013: variable names changed for consistency with other functions (MR) - -n=length(L); -D=inf(n); -D(1:n+1:end)=0; %distance matrix -B=zeros(n); %number of edges matrix - -for u=1:n - S=true(1,n); %distance permanence (true is temporary) - L1=L; - V=u; - while 1 - S(V)=0; %distance u->V is now permanent - L1(:,V)=0; %no in-edges as already shortest - for v=V - T=find(L1(v,:)); %neighbours of shortest nodes - [d,wi]=min([D(u,T);D(u,v)+L1(v,T)]); - D(u,T)=d; %smallest of old/new path lengths - ind=T(wi==2); %indices of lengthened paths - B(u,ind)=B(u,v)+1; %increment no. of edges in lengthened paths - end - - minD=min(D(u,S)); - if isempty(minD)||isinf(minD) %isempty: all nodes reached; - break, %isinf: some nodes cannot be reached - end; - - V=find(D(u,:)==minD); - end -end diff --git a/DefaultData/2019_03_03_BCT/distance_wei_floyd.m b/DefaultData/2019_03_03_BCT/distance_wei_floyd.m deleted file mode 100755 index ac4b5ad..0000000 --- a/DefaultData/2019_03_03_BCT/distance_wei_floyd.m +++ /dev/null @@ -1,121 +0,0 @@ -function [SPL,hops,Pmat] = distance_wei_floyd(D,transform) -% DISTANCE_WEI_FLOYD Distance matrix (Floyd-Warshall algorithm) -% -% [SPL,hops,Pmat] = distance_wei_floyd(D,transform) -% -% Computes the topological length of the shortest possible path -% connecting every pair of nodes in the network. -% -% Inputs: -% -% D, -% Weighted/unweighted directed/undirected -% connection *weight* OR *length* matrix. -% -% transform, -% If the input matrix is a connection *weight* matrix, specify a -% transform that map input connection weights to connection -% lengths. Two transforms are available. -% 'log' -> l_ij = -log(w_ij) -% 'inv' -> l_ij = 1/w_ij -% -% If the input matrix is a connection *length* matrix, do not -% specify a transform (or specify an empty transform argument). -% -% -% Outputs: -% -% SPL, -% Unweighted/Weighted shortest path-length matrix. -% If W is directed matrix, then SPL is not symmetric. -% -% hops, -% Number of edges in the shortest path matrix. If W is -% unweighted, SPL and hops are identical. -% -% Pmat, -% Elements {i,j} of this matrix indicate the next node in the -% shortest path between i and j. This matrix is used as an input -% argument for function 'retrieve_shortest_path.m', which returns -% as output the sequence of nodes comprising the shortest path -% between a given pair of nodes. -% -% -% Notes: -% -% There may be more than one shortest path between any pair of nodes -% in the network. Non-unique shortest paths are termed shortest path -% degeneracies, and are most likely to occur in unweighted networks. -% When the shortest-path is degenerate, The elements of matrix Pmat -% correspond to the first shortest path discovered by the algorithm. -% -% The input matrix may be either a connection weight matrix, or a -% connection length matrix. The connection length matrix is typically -% obtained with a mapping from weight to length, such that higher -% weights are mapped to shorter lengths (see above). -% -% -% Algorithm: Floyd–Warshall Algorithm -% -% -% Andrea Avena-Koenigsberger, IU, 2012 - -% Modification history -% 2016 - included transform variable that maps weights to lengths - -if exist('transform','var') && ~isempty(transform) - - switch transform - - case 'log' - - if any((D<0) & D>1) - error('connection-strengths must be in the interval [0,1) to use the transform -log(w_ij) \n') - else - SPL = -log(D); - end - - case 'inv' - - SPL = 1./D; - - otherwise - - error('Unexpected transform type. Only "log" and "inv" are accepted \n') - end - -else % the input is a connection lengths matrix. - SPL = D; - SPL(SPL == 0) = inf; -end - -n=size(D,2); - -if nargout > 1 - flag_find_paths = true; - hops = double(D ~= 0); - Pmat = 1:n; - Pmat = Pmat(ones(n,1),:); -else - flag_find_paths = false; -end - -for k=1:n - i2k_k2j = bsxfun(@plus, SPL(:,k), SPL(k,:)); - - if flag_find_paths - path = bsxfun(@gt, SPL, i2k_k2j); - [i,j] = find(path); - hops(path) = hops(i,k) + hops(k,j)'; - Pmat(path) = Pmat(i,k); - end - - SPL = min(SPL, i2k_k2j); -end - -SPL(eye(n)>0)=0; - -if flag_find_paths - hops(eye(n)>0)=0; - Pmat(eye(n)>0)=0; -end diff --git a/DefaultData/2019_03_03_BCT/diversity_coef_sign.m b/DefaultData/2019_03_03_BCT/diversity_coef_sign.m deleted file mode 100755 index 661dcbc..0000000 --- a/DefaultData/2019_03_03_BCT/diversity_coef_sign.m +++ /dev/null @@ -1,46 +0,0 @@ -function [Hpos,Hneg] = diversity_coef_sign(W, Ci) -%DIVERSITY_COEF_SIGN Shannon-entropy based diversity coefficient -% -% [Hpos Hneg] = diversity_coef_sign(W,Ci); -% -% The Shannon-entropy based diversity coefficient measures the diversity -% of intermodular connections of individual nodes and ranges from 0 to 1. -% -% Inputs: W, undirected connection matrix with positive and -% negative weights -% -% Ci, community affiliation vector -% -% Output: Hpos, diversity coefficient based on positive connections -% Hneg, diversity coefficient based on negative connections -% -% References: Shannon CE (1948) Bell Syst Tech J 27, 379-423. -% Rubinov and Sporns (2011) NeuroImage. -% -% -% 2011-2012, Mika Rubinov, U Cambridge - -% Modification History: -% Mar 2011: Original -% Sep 2012: Fixed treatment of nodes with no negative strength -% (thanks to Alex Fornito and Martin Monti) - - -n = length(W); %number of nodes -m = max(Ci); %number of modules - -Hpos = entropy(W.*(W>0)); -Hneg = entropy(-W.*(W<0)); - - function H = entropy(W_) - S = sum(W_,2); %strength - Snm = zeros(n,m); %node-to-module degree - for i = 1:m %loop over modules - Snm(:,i) = sum(W_(:,Ci==i),2); - end - pnm = Snm ./ S(:,ones(1,m)); - pnm(isnan(pnm)) = 0; - pnm(~pnm) = 1; - H = -sum(pnm.*log(pnm),2)/log(m); - end -end diff --git a/DefaultData/2019_03_03_BCT/edge_betweenness_bin.m b/DefaultData/2019_03_03_BCT/edge_betweenness_bin.m deleted file mode 100755 index d2ebccb..0000000 --- a/DefaultData/2019_03_03_BCT/edge_betweenness_bin.m +++ /dev/null @@ -1,68 +0,0 @@ -function [EBC,BC]=edge_betweenness_bin(G) -%EDGE_BETWEENNESS_BIN Edge betweenness centrality -% -% EBC = edge_betweenness_bin(A); -% [EBC BC] = edge_betweenness_bin(A); -% -% Edge betweenness centrality is the fraction of all shortest paths in -% the network that contain a given edge. Edges with high values of -% betweenness centrality participate in a large number of shortest paths. -% -% Input: A, binary (directed/undirected) connection matrix. -% -% Output: EBC, edge betweenness centrality matrix. -% BC, node betweenness centrality vector. -% -% Note: Betweenness centrality may be normalised to the range [0,1] as -% BC/[(N-1)(N-2)], where N is the number of nodes in the network. -% -% Reference: Brandes (2001) J Math Sociol 25:163-177. -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2012 - - -n=length(G); -BC=zeros(n,1); %vertex betweenness -EBC=zeros(n); %edge betweenness - -for u=1:n - D=false(1,n); D(u)=1; %distance from u - NP=zeros(1,n); NP(u)=1; %number of paths from u - P=false(n); %predecessors - Q=zeros(1,n); q=n; %order of non-increasing distance - - Gu=G; - V=u; - while V - Gu(:,V)=0; %remove remaining in-edges - for v=V - Q(q)=v; q=q-1; - W=find(Gu(v,:)); %neighbours of v - for w=W - if D(w) - NP(w)=NP(w)+NP(v); %NP(u->w) sum of old and new - P(w,v)=1; %v is a predecessor - else - D(w)=1; - NP(w)=NP(v); %NP(u->w) = NP of new path - P(w,v)=1; %v is a predecessor - end - end - end - V=find(any(Gu(V,:),1)); - end - if ~all(D) %if some vertices unreachable, - Q(1:q)=find(~D); %...these are first-in-line - end - - DP=zeros(n,1); %dependency - for w=Q(1:n-1) - BC(w)=BC(w)+DP(w); - for v=find(P(w,:)) - DPvw=(1+DP(w)).*NP(v)./NP(w); - DP(v)=DP(v)+DPvw; - EBC(v,w)=EBC(v,w)+DPvw; - end - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/edge_betweenness_wei.m b/DefaultData/2019_03_03_BCT/edge_betweenness_wei.m deleted file mode 100755 index 86fa9ff..0000000 --- a/DefaultData/2019_03_03_BCT/edge_betweenness_wei.m +++ /dev/null @@ -1,82 +0,0 @@ -function [EBC,BC]=edge_betweenness_wei(G) -%EDGE_BETWEENNESS_WEI Edge betweenness centrality -% -% EBC = edge_betweenness_wei(L); -% [EBC BC] = edge_betweenness_wei(L); -% -% Edge betweenness centrality is the fraction of all shortest paths in -% the network that contain a given edge. Edges with high values of -% betweenness centrality participate in a large number of shortest paths. -% -% Input: L, Directed/undirected connection-length matrix. -% -% Output: EBC, edge betweenness centrality matrix. -% BC, nodal betweenness centrality vector. -% -% Notes: -% The input matrix must be a connection-length matrix, typically -% obtained via a mapping from weight to length. For instance, in a -% weighted correlation network higher correlations are more naturally -% interpreted as shorter distances and the input matrix should -% consequently be some inverse of the connectivity matrix. -% Betweenness centrality may be normalised to the range [0,1] as -% BC/[(N-1)(N-2)], where N is the number of nodes in the network. -% -% Reference: Brandes (2001) J Math Sociol 25:163-177. -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2012 - - -n=length(G); -% E=find(G); G(E)=1./G(E); %invert weights -BC=zeros(n,1); %vertex betweenness -EBC=zeros(n); %edge betweenness - -for u=1:n - D=inf(1,n); D(u)=0; %distance from u - NP=zeros(1,n); NP(u)=1; %number of paths from u - S=true(1,n); %distance permanence (true is temporary) - P=false(n); %predecessors - Q=zeros(1,n); q=n; %order of non-increasing distance - - G1=G; - V=u; - while 1 - S(V)=0; %distance u->V is now permanent - G1(:,V)=0; %no in-edges as already shortest - for v=V - Q(q)=v; q=q-1; - W=find(G1(v,:)); %neighbours of v - for w=W - Duw=D(v)+G1(v,w); %path length to be tested - if Duww shorter than old - D(w)=Duw; - NP(w)=NP(v); %NP(u->w) = NP of new path - P(w,:)=0; - P(w,v)=1; %v is the only predecessor - elseif Duw==D(w) %if new u->w equal to old - NP(w)=NP(w)+NP(v); %NP(u->w) sum of old and new - P(w,v)=1; %v is also a predecessor - end - end - end - - minD=min(D(S)); - if isempty(minD), break %all nodes reached, or - elseif isinf(minD) %...some cannot be reached: - Q(1:q)=find(isinf(D)); break %...these are first-in-line - end - V=find(D==minD); - end - - DP=zeros(n,1); %dependency - for w=Q(1:n-1) - BC(w)=BC(w)+DP(w); - for v=find(P(w,:)) - DPvw=(1+DP(w)).*NP(v)./NP(w); - DP(v)=DP(v)+DPvw; - EBC(v,w)=EBC(v,w)+DPvw; - end - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/edge_nei_overlap_bd.m b/DefaultData/2019_03_03_BCT/edge_nei_overlap_bd.m deleted file mode 100755 index 4efa6ad..0000000 --- a/DefaultData/2019_03_03_BCT/edge_nei_overlap_bd.m +++ /dev/null @@ -1,48 +0,0 @@ -function [EC,ec,degij] = edge_nei_overlap_bd(CIJ) -% EDGE_NEI_OVERLAP_BD Overlap amongst neighbors of two adjacent nodes -% -% [EC,ec,degij] = edge_nei_bd(CIJ); -% -% This function determines the neighbors of two nodes that are linked by -% an edge, and then computes their overlap. Connection matrix must be -% binary and directed. Entries of 'EC' that are 'inf' indicate that no -% edge is present. Entries of 'EC' that are 0 denote "local bridges", -% i.e. edges that link completely non-overlapping neighborhoods. Low -% values of EC indicate edges that are "weak ties". -% -% If CIJ is weighted, the weights are ignored. Neighbors of a node can be -% linked by incoming, outgoing, or reciprocal connections. -% -% Inputs: CIJ, directed (binary/weighted) connection matrix -% -% Outputs: EC, edge neighborhood overlap matrix -% ec, edge neighborhood overlap per edge, in vector format -% degij, degrees of node pairs connected by each edge -% -% Reference: -% -% Easley and Kleinberg (2010) Networks, Crowds, and Markets. -% Cambridge University Press, Chapter 3 -% -% Olaf Sporns, Indiana University, 2012 - -[ik,jk,ck] = find(CIJ); -lel = length(ck); -N = size(CIJ,1); - -[~,~,deg] = degrees_dir(CIJ); - -ec = zeros(1,lel); -degij = zeros(2,lel); -for e=1:lel - neiik = setdiff(union(find(CIJ(ik(e),:)),find(CIJ(:,ik(e))')),[ik(e) jk(e)]); - neijk = setdiff(union(find(CIJ(jk(e),:)),find(CIJ(:,jk(e))')),[ik(e) jk(e)]); - ec(e) = length(intersect(neiik,neijk))/length(union(neiik,neijk)); - degij(:,e) = [deg(ik(e)) deg(jk(e))]; -end; - -ff = find(CIJ); -EC = 1./zeros(N); -EC(ff) = ec; %#ok - - diff --git a/DefaultData/2019_03_03_BCT/edge_nei_overlap_bu.m b/DefaultData/2019_03_03_BCT/edge_nei_overlap_bu.m deleted file mode 100755 index 4aed3ab..0000000 --- a/DefaultData/2019_03_03_BCT/edge_nei_overlap_bu.m +++ /dev/null @@ -1,45 +0,0 @@ -function [EC,ec,degij] = edge_nei_overlap_bu(CIJ) -% EDGE_NEI_OVERLAP_BU Overlap amongst neighbors of two adjacent nodes -% -% [EC,ec,degij] = edge_nei_bu(CIJ); -% -% This function determines the neighbors of two nodes that are linked by -% an edge, and then computes their overlap. Connection matrix must be -% binary and directed. Entries of 'EC' that are 'inf' indicate that no -% edge is present. Entries of 'EC' that are 0 denote "local bridges", i.e. -% edges that link completely non-overlapping neighborhoods. Low values -% of EC indicate edges that are "weak ties". -% -% If CIJ is weighted, the weights are ignored. -% -% Inputs: CIJ, undirected (binary/weighted) connection matrix -% -% Outputs: EC, edge neighborhood overlap matrix -% ec, edge neighborhood overlap per edge, in vector format -% degij, degrees of node pairs connected by each edge -% -% Reference: Easley and Kleinberg (2010) Networks, Crowds, and Markets. -% Cambridge University Press, Chapter 3. -% -% Olaf Sporns, Indiana University, 2012 - -[ik,jk,ck] = find(CIJ); -lel = length(ck); -N = size(CIJ,1); - -[deg] = degrees_und(CIJ); - -ec = zeros(1,lel); -degij = zeros(2,lel); -for e=1:lel - neiik = setdiff(union(find(CIJ(ik(e),:)),find(CIJ(:,ik(e))')),[ik(e) jk(e)]); - neijk = setdiff(union(find(CIJ(jk(e),:)),find(CIJ(:,jk(e))')),[ik(e) jk(e)]); - ec(e) = length(intersect(neiik,neijk))/length(union(neiik,neijk)); - degij(:,e) = [deg(ik(e)) deg(jk(e))]; -end; - -ff = find(CIJ); -EC = 1./zeros(N); -EC(ff) = ec; %#ok - - diff --git a/DefaultData/2019_03_03_BCT/efficiency_bin.m b/DefaultData/2019_03_03_BCT/efficiency_bin.m deleted file mode 100755 index a1eda71..0000000 --- a/DefaultData/2019_03_03_BCT/efficiency_bin.m +++ /dev/null @@ -1,76 +0,0 @@ -function E=efficiency_bin(A,local) -%EFFICIENCY_BIN Global efficiency, local efficiency. -% -% Eglob = efficiency_bin(A); -% Eloc = efficiency_bin(A,1); -% -% The global efficiency is the average of inverse shortest path length, -% and is inversely related to the characteristic path length. -% -% The local efficiency is the global efficiency computed on the -% neighborhood of the node, and is related to the clustering coefficient. -% -% Inputs: A, binary undirected or directed connection matrix -% local, optional argument -% local=0 computes global efficiency (default) -% local=1 computes local efficiency -% -% Output: Eglob, global efficiency (scalar) -% Eloc, local efficiency (vector) -% -% -% Algorithm: algebraic path count -% -% Reference: Latora and Marchiori (2001) Phys Rev Lett 87:198701. -% Fagiolo (2007) Phys Rev E 76:026107. -% Rubinov M, Sporns O (2010) NeuroImage 52:1059-69 -% -% -% Mika Rubinov, U Cambridge -% Jonathan Clayden, UCL -% 2008-2013 - -% Modification history: -% 2008: Original (MR) -% 2013: Bug fix, enforce zero distance for self-connections (JC) -% 2013: Local efficiency generalized to directed networks - -n=length(A); %number of nodes -A(1:n+1:end)=0; %clear diagonal -A=double(A~=0); %enforce double precision - -if exist('local','var') && local %local efficiency - E=zeros(n,1); - for u=1:n - V=find(A(u,:)|A(:,u).'); %neighbors - sa=A(u,V)+A(V,u).'; %symmetrized adjacency vector - e=distance_inv(A(V,V)); %inverse distance matrix - se=e+e.'; %symmetrized inverse distance matrix - numer=sum(sum((sa.'*sa).*se))/2; %numerator - if numer~=0 - denom=sum(sa).^2 - sum(sa.^2); %denominator - E(u)=numer/denom; %local efficiency - end - end -else %global efficiency - e=distance_inv(A); - E=sum(e(:))./(n^2-n); -end - - -function D=distance_inv(A_) -l=1; %path length -Lpath=A_; %matrix of paths l -D=A_; %distance matrix -n_=length(A_); - -Idx=true; -while any(Idx(:)) - l=l+1; - Lpath=Lpath*A_; - Idx=(Lpath~=0)&(D==0); - D(Idx)=l; -end - -D(~D | eye(n_))=inf; %assign inf to disconnected nodes and to diagonal -D=1./D; %invert distance diff --git a/DefaultData/2019_03_03_BCT/efficiency_wei.m b/DefaultData/2019_03_03_BCT/efficiency_wei.m deleted file mode 100755 index 394ff4d..0000000 --- a/DefaultData/2019_03_03_BCT/efficiency_wei.m +++ /dev/null @@ -1,136 +0,0 @@ -function E = efficiency_wei(W, local) -%EFFICIENCY_WEI Global efficiency, local efficiency. -% -% Eglob = efficiency_wei(W); -% Eloc = efficiency_wei(W,2); -% -% The global efficiency is the average of inverse shortest path length, -% and is inversely related to the characteristic path length. -% -% The local efficiency is the global efficiency computed on the -% neighborhood of the node, and is related to the clustering coefficient. -% -% Inputs: W, -% weighted undirected or directed connection matrix -% -% local, -% optional argument -% local=0 computes the global efficiency (default). -% local=1 computes the original version of the local -% efficiency. -% local=2 computes the modified version of the local -% efficiency (recommended, see below). -% -% Output: Eglob, -% global efficiency (scalar) -% Eloc, -% local efficiency (vector) -% -% Notes: -% The efficiency is computed using an auxiliary connection-length -% matrix L, defined as L_ij = 1/W_ij for all nonzero L_ij; This has an -% intuitive interpretation, as higher connection weights intuitively -% correspond to shorter lengths. -% The weighted local efficiency broadly parallels the weighted -% clustering coefficient of Onnela et al. (2005) and distinguishes the -% influence of different paths based on connection weights of the -% corresponding neighbors to the node in question. In other words, a path -% between two neighbors with strong connections to the node in question -% contributes more to the local efficiency than a path between two weakly -% connected neighbors. Note that the original weighted variant of the -% local efficiency (described in Rubinov and Sporns, 2010) is not a -% true generalization of the binary variant, while the modified variant -% (described in Wang et al., 2016) is a true generalization. -% For ease of interpretation of the local efficiency it may be -% advantageous to rescale all weights to lie between 0 and 1. -% -% Algorithm: Dijkstra's algorithm -% -% References: Latora and Marchiori (2001) Phys Rev Lett 87:198701. -% Onnela et al. (2005) Phys Rev E 71:065103 -% Fagiolo (2007) Phys Rev E 76:026107. -% Rubinov M, Sporns O (2010) NeuroImage 52:1059-69 -% Wang Y et al. (2016) Neural Comput 21:1-19. -% -% Mika Rubinov, U Cambridge/Janelia HHMI, 2011-2017 - -%Modification history -% 2011: Original (based on efficiency.m and distance_wei.m) -% 2013: Local efficiency generalized to directed networks -% 2017: Added the modified local efficiency and updated documentation. - -n = length(W); % number of nodes -ot = 1 / 3; % one third - -L = W; % connection-length matrix -A = W > 0; % adjacency matrix -L(A) = 1 ./ L(A); -A = double(A); - -if exist('local','var') && local % local efficiency - E = zeros(n, 1); - cbrt_W = W.^ot; - switch local - case 1 - for u = 1:n - V = find(A(u, :) | A(:, u).'); % neighbors - sw = cbrt_W(u, V) + cbrt_W(V, u).'; % symmetrized weights vector - di = distance_inv_wei(L(V, V)); % inverse distance matrix - se = di.^ot + di.'.^ot; % symmetrized inverse distance matrix - numer = (sum(sum((sw.' * sw) .* se)))/2; % numerator - if numer~=0 - sa = A(u, V) + A(V, u).'; % symmetrized adjacency vector - denom = sum(sa).^2 - sum(sa.^2); % denominator - E(u) = numer / denom; % local efficiency - end - end - case 2 - cbrt_L = L.^ot; - for u = 1:n - V = find(A(u, :) | A(:, u).'); % neighbors - sw = cbrt_W(u, V) + cbrt_W(V, u).'; % symmetrized weights vector - di = distance_inv_wei(cbrt_L(V, V)); % inverse distance matrix - se = di + di.'; % symmetrized inverse distance matrix - numer=(sum(sum((sw.' * sw) .* se)))/2; % numerator - if numer~=0 - sa = A(u, V) + A(V, u).'; % symmetrized adjacency vector - denom = sum(sa).^2 - sum(sa.^2); % denominator - E(u) = numer / denom; % local efficiency - end - end - end -else - di = distance_inv_wei(L); - E = sum(di(:)) ./ (n^2 - n); % global efficiency -end - - -function D=distance_inv_wei(W_) - -n_=length(W_); -D=inf(n_); % distance matrix -D(1:n_+1:end)=0; - -for u=1:n_ - S=true(1,n_); % distance permanence (true is temporary) - W1_=W_; - V=u; - while 1 - S(V)=0; % distance u->V is now permanent - W1_(:,V)=0; % no in-edges as already shortest - for v=V - T=find(W1_(v,:)); % neighbours of shortest nodes - D(u,T)=min([D(u,T);D(u,v)+W1_(v,T)]); % smallest of old/new path lengths - end - - minD=min(D(u,S)); - if isempty(minD)||isinf(minD) % isempty: all nodes reached; - break, % isinf: some nodes cannot be reached - end; - - V=find(D(u,:)==minD); - end -end - -D=1./D; % invert distance -D(1:n_+1:end)=0; diff --git a/DefaultData/2019_03_03_BCT/eigenvector_centrality_und.m b/DefaultData/2019_03_03_BCT/eigenvector_centrality_und.m deleted file mode 100755 index ce05698..0000000 --- a/DefaultData/2019_03_03_BCT/eigenvector_centrality_und.m +++ /dev/null @@ -1,37 +0,0 @@ -function v = eigenvector_centrality_und(CIJ) -%EIGENVECTOR_CENTRALITY_UND Spectral measure of centrality -% -% v = eigenvector_centrality_und(CIJ) -% -% Eigenector centrality is a self-referential measure of centrality: -% nodes have high eigenvector centrality if they connect to other nodes -% that have high eigenvector centrality. The eigenvector centrality of -% node i is equivalent to the ith element in the eigenvector -% corresponding to the largest eigenvalue of the adjacency matrix. -% -% Inputs: CIJ, binary/weighted undirected adjacency matrix. -% -% Outputs: v, eigenvector associated with the largest -% eigenvalue of the adjacency matrix CIJ. -% -% Reference: Newman, MEJ (2002). The mathematics of networks. -% -% Contributors: -% Xi-Nian Zuo, Chinese Academy of Sciences, 2010 -% Rick Betzel, Indiana University, 2012 -% Mika Rubinov, University of Cambridge, 2015 - -% MODIFICATION HISTORY -% 2010/2012: original (XNZ, RB) -% 2015: ensure the use of leading eigenvector (MR) - - -n = length(CIJ); -if n < 1000 - [V,D] = eig(CIJ); -else - [V,D] = eigs(sparse(CIJ)); -end -[~,idx] = max(diag(D)); -ec = abs(V(:,idx)); -v = reshape(ec, length(ec), 1); \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/erange.m b/DefaultData/2019_03_03_BCT/erange.m deleted file mode 100755 index 7501416..0000000 --- a/DefaultData/2019_03_03_BCT/erange.m +++ /dev/null @@ -1,45 +0,0 @@ -function [Erange,eta,Eshort,fs] = erange(CIJ) -%ERANGE Shortcuts -% -% [Erange,eta,Eshort,fs] = erange(CIJ); -% -% Shorcuts are central edges which significantly reduce the -% characteristic path length in the network. -% -% Input: CIJ, binary directed connection matrix -% -% Outputs: Erange, range for each edge, i.e. the length of the -% shortest path from i to j for edge c(i,j) AFTER -% the edge has been removed from the graph. -% eta average range for entire graph. -% Eshort entries are ones for shortcut edges. -% fs fraction of shortcuts in the graph. -% -% Follows the treatment of 'shortcuts' by Duncan Watts -% -% -% Olaf Sporns, Indiana University, 2002/2007/2008 - - -N = size(CIJ,1); -K = length(nonzeros(CIJ)); -Erange = zeros(N,N); -[i,j] = find(CIJ==1); - -for c=1:length(i) - CIJcut = CIJ; - CIJcut(i(c),j(c)) = 0; - [~, D] = reachdist(CIJcut); - Erange(i(c),j(c)) = D(i(c),j(c)); -end; - -% average range (ignore Inf) -eta = sum(Erange((Erange>0)&(Erange0)&(Erange 2, then the edge is a shortcut. -% 'fshort' is the fraction of shortcuts over the entire graph. - -Eshort = Erange>2; -fs = length(nonzeros(Eshort))/K; \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/evaluate_generative_model.m b/DefaultData/2019_03_03_BCT/evaluate_generative_model.m deleted file mode 100755 index 50f1476..0000000 --- a/DefaultData/2019_03_03_BCT/evaluate_generative_model.m +++ /dev/null @@ -1,100 +0,0 @@ -function [B,E,K] = evaluate_generative_model(A,Atgt,D,modeltype,modelvar,params) -% EVALUATE_GENERATIVE_MODEL Generation and evaluation of synthetic networks -% -% [B,E,K] = EVALUATE_GENERATIVE_MODEL(A,Atgt,D,m,modeltype,modelvar,params) -% -% Generates synthetic networks and evaluates their energy function (see -% below) using the models described in the study by Betzel et al (2016) -% in Neuroimage. -% -% Inputs: -% A, binary network of seed connections -% Atgt, binary network against which synthetic networks are -% compared -% D, Euclidean distance/fiber length matrix -% m, number of connections that should be present in -% final synthetic network -% modeltype, specifies the generative rule (see below) -% modelvar, specifies whether the generative rules are based on -% power-law or exponential relationship -% ({'powerlaw'}|{'exponential}) -% params, either a vector (in the case of the geometric -% model) or a matrix (for all other models) of -% parameters at which the model should be evaluated. -% -% Outputs: -% B, m x number of networks matrix of connections -% E, energy for each synthetic network -% K, Kolmogorov-Smirnov statistics for each synthetic -% network. -% -% Full list of model types: -% (each model type realizes a different generative rule) -% -% 1. 'sptl' spatial model -% 2. 'neighbors' number of common neighbors -% 3. 'matching' matching index -% 4. 'clu-avg' average clustering coeff. -% 5. 'clu-min' minimum clustering coeff. -% 6. 'clu-max' maximum clustering coeff. -% 7. 'clu-diff' difference in clustering coeff. -% 8. 'clu-prod' product of clustering coeff. -% 9. 'deg-avg' average degree -% 10. 'deg-min' minimum degree -% 11. 'deg-max' maximum degree -% 12. 'deg-diff' difference in degree -% 13. 'deg-prod' product of degree -% -% Note: Energy is calculated in exactly the same way as in Betzel et -% al (2016). There are four components to the energy are KS statistics -% comparing degree, clustering coefficient, betweenness centrality, and -% edge length distributions. Energy is calculated as the maximum across -% all four statistics. -% -% Reference: Betzel et al (2016) Neuroimage 124:1054-64. -% -% Richard Betzel, Indiana University/University of Pennsylvania, 2015 - -m = nnz(Atgt)/2; -n = length(Atgt); -x = cell(4,1); -x{1} = sum(Atgt,2); -x{2} = clustering_coef_bu(Atgt); -x{3} = betweenness_bin(Atgt)'; -x{4} = D(triu(Atgt,1) > 0); - -B = generative_model(A,D,m,modeltype,modelvar,params); -nB = size(B,2); - -K = zeros(nB,4); -for iB = 1:nB - b = zeros(n); - b(B(:,iB)) = 1; - b = b + b'; - y = cell(4,1); - y{1} = sum(b,2); - y{2} = clustering_coef_bu(b); - y{3} = betweenness_bin(b)'; - y{4} = D(triu(b,1) > 0); - for j = 1:4 - K(iB,j) = fcn_ks(x{j},y{j}); - end -end -E = max(K,[],2); - - -function kstat = fcn_ks(x1,x2) -binEdges = [-inf ; sort([x1;x2]) ; inf]; - -binCounts1 = histc (x1 , binEdges, 1); -binCounts2 = histc (x2 , binEdges, 1); - -sumCounts1 = cumsum(binCounts1)./sum(binCounts1); -sumCounts2 = cumsum(binCounts2)./sum(binCounts2); - -sampleCDF1 = sumCounts1(1:end-1); -sampleCDF2 = sumCounts2(1:end-1); - -deltaCDF = abs(sampleCDF1 - sampleCDF2); -kstat = max(deltaCDF); - diff --git a/DefaultData/2019_03_03_BCT/find_motif34.m b/DefaultData/2019_03_03_BCT/find_motif34.m deleted file mode 100755 index 18550da..0000000 --- a/DefaultData/2019_03_03_BCT/find_motif34.m +++ /dev/null @@ -1,50 +0,0 @@ -function M=find_motif34(m,n) -%FIND_MOTIF34 Motif legend -% -% Motif_matrices = find_motif34(Motif_id,Motif_class); -% Motif_id = find_motif34(Motif_matrix); -% -% This function returns all motif isomorphs for a given motif id and -% class (3 or 4). The function also returns the motif id for a given -% motif matrix -% -% 1. Input: Motif_id, e.g. 1 to 13, if class is 3 -% Motif_class, number of nodes, 3 or 4. -% -% Output: Motif_matrices, all isomorphs for the given motif -% -% 2. Input: Motif_matrix e.g. [0 1 0; 0 0 1; 1 0 0] -% -% Output Motif_id e.g. 1 to 13, if class is 3 -% -% -%Mika Rubinov, UNSW, 2007-2008 - -persistent M3 ID3 M4 ID4 - -if isscalar(m) - if n==3 - if isempty(ID3) - load motif34lib M3 ID3; - end - ind=find(ID3==m).'; - M=zeros(3,3,length(ind)); - for i=1:length(ind) - M(:,:,i)=reshape([0 M3(ind(i),1:3) 0 ... - M3(ind(i),4:6) 0],3,3); - end - elseif n==4 - if isempty(ID4) - load motif34lib M4 ID4; - end - ind=find(ID4==m).'; - M=zeros(4,4,length(ind)); - for i=1:length(ind) - M(:,:,i)=reshape([0 M4(ind(i),1:4) 0 ... - M4(ind(i),5:8) 0 M4(ind(i),9:12) 0],4,4); - end - end -else - n=size(m,1); - M=eval(['find(motif' int2str(n) 'struct_bin(m))']); -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/findpaths.m b/DefaultData/2019_03_03_BCT/findpaths.m deleted file mode 100755 index c232409..0000000 --- a/DefaultData/2019_03_03_BCT/findpaths.m +++ /dev/null @@ -1,158 +0,0 @@ -function [Pq,tpath,plq,qstop,allpths,util] = findpaths(CIJ,sources,qmax,savepths) -%FINDPATHS Network paths -% -% [Pq,tpath,plq,qstop,allpths,util] = findpaths(CIJ,sources,qmax,savepths); -% -% Paths are sequences of linked nodes, that never visit a single node -% more than once. This function finds all paths that start at a set of -% source nodes, up to a specified length. Warning: very memory-intensive. -% -% Inputs: CIJ, binary (directed/undirected) connection matrix -% qmax, maximal path length -% sources, source units from which paths are grown -% savepths, set to 1 if all paths are to be collected in -% 'allpths' -% -% Outputs: Pq, 3D matrix, with P(i,j,q) = number of paths from -% 'i' to 'j' of length 'q'. -% tpath, total number of paths found (lengths 1 to 'qmax') -% plq, path length distribution as a function of 'q' -% qstop, path length at which 'findpaths' is stopped -% allpths, a matrix containing all paths up to 'qmax' -% util, node use index -% -% Note that Pq(:,:,N) can only carry entries on the diagonal, as all -% "legal" paths of length N-1 must terminate. Cycles of length N are -% possible, with all vertices visited exactly once (except for source and -% target). 'qmax = N' can wreak havoc (due to memory problems). -% -% Note: Weights are discarded. -% Note: I am certain that this algorithm is rather inefficient - -% suggestions for improvements are welcome. -% -% Olaf Sporns, Indiana University, 2002/2007/2008/2010 - -% 2010 version: -% -- a bug affecting the calculation of 'util' was fixed -- thanks to -% Steve Williams -% -- better pre-allocation for 'npths' -% -- note that this code assumes a directed graph as input - calculation -% of paths and 'util' indices can be easily adapted to undirected -% graphs. - -% ensure CIJ is binary... -CIJ = double(CIJ~=0); - -% initialize some variables -N = size(CIJ,1); K = sum(sum(CIJ)); -pths = []; -Pq = zeros(N,N,qmax); -util = zeros(N,qmax); - -% this code is for pathlength = 1 -% paths are seeded from 'sources' -q = 1; -for j=1:N - for i=1:length(sources) - is = sources(i); - if (CIJ(is,j) == 1) - pths = [pths [is j]']; - end; - end; -end; - -% calculate the use index per vertex (for paths of length 1) -util(1:N,q) = util(1:N,q)+hist(reshape(pths,1,size(pths,1)*size(pths,2)),1:N)'; -% now enter the found paths of length 1 into the pathmatrix Pq -for np=1:size(pths,2) - Pq(pths(1,np),pths(q+1,np),q) = Pq(pths(1,np),pths(q+1,np),q) + 1; -end; - -% begin saving all paths -if (savepths==1) - allpths = pths; -end; -if (savepths~=1) - allpths = []; -end; - -% initialize -npthscnt = K; - -% "big loop" for all other pathlengths 'q' -% ---------------------------------------------------------------------- -for q=2:qmax - - % to keep track of time... - disp(['current pathlength (q) = ',num2str(q),' number of paths so far (up to q-1)= ',num2str(sum(sum(sum(Pq))))]) - - % old paths are now in 'pths' - % new paths are about to be collected in 'npths' - % estimate needed allocation for new paths - len_npths = min(ceil(1.1*npthscnt*K/N),100000000); - npths = zeros(q+1,len_npths); - - % find the unique set of endpoints of 'pths' - endp = unique(pths(q,:)); - npthscnt = 0; - - for ii=1:length(endp) % set of endpoints of previous paths - i = endp(ii); - % in 'pb' collect all previous paths with 'i' as their endpoint - [pa,pb] = find(pths(q,:) == i); - % find the outgoing connections from 'i' ("breadth-first") - nendp = find(CIJ(i,:)==1); - % if 'i' is not a dead end - if (~isempty(nendp)) - for jj=1:length(nendp) % endpoints of next edge - j = nendp(jj); - % find new paths - only "legal" ones, i.e. no vertex is visited twice - pb_temp = pb(sum(j==pths(2:q,pb),1)==0); - % add new paths to 'npths' - npths(:,npthscnt+1:npthscnt+length(pb_temp)) = [pths(:,pb_temp)' ones(length(pb_temp),1)*j]'; - npthscnt = npthscnt+length(pb_temp); - % count new paths and add the number to 'P' - Pq(1:N,j,q) = Pq(1:N,j,q)+(hist(pths(1,pb_temp),1:N))'; - end; - end; - end; - - % note: 'npths' now contains a list of all the paths of length 'q' - if (len_npths>npthscnt) - npths = npths(:,1:npthscnt); - end; - - % append the matrix of all paths - if (savepths==1) - allpths = [allpths; zeros(1,size(allpths,2))]; - allpths = [allpths npths(:,1:npthscnt)]; - end; - - % calculate the use index per vertex (correct for cycles, count - % source/target only once) - util(1:N,q) = util(1:N,q) + hist(reshape(npths(:,1:npthscnt),1,size(npths,1)*npthscnt),1:N)' - diag(Pq(:,:,q)); - % eliminate cycles from "making it" to the next level, so that - % 'pths' contains all the paths that have a chance of being continued - if (~isempty(npths)) - pths = npths(:,npths(1,:)~=npths(q+1,:)); - else - pths = []; - end; - - % if there are no 'pths' paths left, end the search - if (isempty(pths)) - qstop = q; - tpath = sum(sum(sum(Pq))); - plq = reshape(sum(sum(Pq)),1,qmax); - return; - end; - -end; % q -% ---------------------------------------------------------------------- -qstop = q; - -% total number of paths -tpath = sum(sum(sum(Pq))); - -% path length distribution -plq = reshape(sum(sum(Pq)),1,qmax); diff --git a/DefaultData/2019_03_03_BCT/findwalks.m b/DefaultData/2019_03_03_BCT/findwalks.m deleted file mode 100755 index 3efd4b5..0000000 --- a/DefaultData/2019_03_03_BCT/findwalks.m +++ /dev/null @@ -1,41 +0,0 @@ -function [Wq,twalk,wlq] = findwalks(CIJ) -%FINDWALKS Network walks -% -% [Wq,twalk,wlq] = findwalks(CIJ); -% -% Walks are sequences of linked nodes, that may visit a single node more -% than once. This function finds the number of walks of a given length, -% between any two nodes. -% -% Input: CIJ binary (directed/undirected) connection matrix -% -% Outputs: Wq 3D matrix, Wq(i,j,q) is the number of walks -% from 'i' to 'j' of length 'q'. -% twalk total number of walks found -% wlq walk length distribution as function of 'q' -% -% Notes: Wq grows very quickly for larger N,K,q. Weights are discarded. -% -% Algorithm: algebraic path count -% -% -% Olaf Sporns, Indiana University, 2002/2007/2008 - -% ensure CIJ is binary... -CIJ = double(CIJ~=0); - -N = size(CIJ,1); -Wq = zeros(N,N,N); -CIJpwr = CIJ; -Wq(:,:,1) = CIJ; -for q=2:N - CIJpwr = CIJpwr*CIJ; - Wq(:,:,q) = CIJpwr; -end; - -% total number of walks -twalk = sum(sum(sum(Wq))); - -% walk length distribution -wlq = reshape(sum(sum(Wq)),1,N); - diff --git a/DefaultData/2019_03_03_BCT/flow_coef_bd.m b/DefaultData/2019_03_03_BCT/flow_coef_bd.m deleted file mode 100755 index 997e482..0000000 --- a/DefaultData/2019_03_03_BCT/flow_coef_bd.m +++ /dev/null @@ -1,51 +0,0 @@ -function [fc,FC,total_flo] = flow_coef_bd(CIJ) -%FLOW_COEF_BD Node-wise flow coefficients -% -% [hc,HC,total_flo] = flow_coef_bd(CIJ) -% -% Computes the flow coefficient for each node and averaged over the -% network, as described in Honey et al. (2007) PNAS. The flow coefficient -% is similar to betweenness centrality, but works on a local -% neighborhood. It is mathematically related to the clustering -% coefficient (cc) at each node as, fc+cc <= 1. -% -% input: CIJ, connection/adjacency matrix (binary, directed) -% output: fc, flow coefficient for each node -% FC, average flow coefficient over the network -% total_flo, number of paths that "flow" across the central node -% -% Reference: Honey et al. (2007) Proc Natl Acad Sci U S A -% -% Olaf Sporns, Indiana University, 2007/2010/2012 - -N = size(CIJ,1); - -% initialize ... -fc = zeros(1,N); -total_flo = fc; -max_flo = fc; - -% loop over nodes -for v=1:N - % find neighbors - note: treats incoming and outgoing connections as equal - [nb] = find(CIJ(v,:) + CIJ(:,v)'); - fc(v) = 0; - if (~isempty(nb)) - CIJflo = -CIJ(nb,nb); - for i=1:length(nb) - for j=1:length(nb) - if((CIJ(nb(i),v))==1)&&(CIJ(v,nb(j))==1) - CIJflo(i,j) = CIJflo(i,j) + 1; - end; - end; - end; - total_flo(v) = sum(sum(double(CIJflo==1).*~eye(length(nb)))); - max_flo(v) = length(nb)^2-length(nb); - fc(v) = total_flo(v)/max_flo(v); - end; -end; - -% handle nodes that are NaNs -fc(isnan(fc)) = 0; - -FC = mean(fc); diff --git a/DefaultData/2019_03_03_BCT/gateway_coef_sign.m b/DefaultData/2019_03_03_BCT/gateway_coef_sign.m deleted file mode 100755 index 2d6a02d..0000000 --- a/DefaultData/2019_03_03_BCT/gateway_coef_sign.m +++ /dev/null @@ -1,83 +0,0 @@ -function [GWpos,GWneg] = gateway_coef_sign(W,Ci,centtype) - -% Gateway coefficient -% -% [Gpos,Gneg] = gateway_coef_sign(W,Ci,centtype); -% -% Gateway coefficient is a variant of participation coefficient. Similar -% to participation coefficient, gateway coefficient measures the -% diversity of intermodular connections of individual nodes, but this is -% weighted by how critical these connections are to intermodular -% connectivity (e.g., if a node is the only connection between it's -% module and another module, it will have a higher gateway coefficient). -% -% Inputs: W, undirected connection matrix with positive and -% negative weights -% -% Ci, community affiliation vector -% -% centtype, centrality measure to use -% 1 = Node Strength -% 2 = Betweenness Centrality -% -% Output: Gpos, gateway coefficient for positive weights -% Gneg, gateway coefficient for negative weights -% -% Reference: Vargas ER, Wahl LM. Eur Phys J B (2014) 87:1-10. -% -% Jeff Spielberg, University of Delaware - -% Modification History: -% May 2015: Original (originally adapted from participation_coef_sign.m) -% July 2018: Bugfix, change in how weighted matrices are handled, -% improvements for efficiency, additional line documentation - -[~,~,Ci] = unique(Ci); % Remap module indices to consecutive numbers -n = length(W); % Number of nodes -W(1:(n+1):end) = 0; % Ensure diagonal is zero -GWpos = gcoef(W.*(W>0)); % Compute gateway coefficient for positive weights -GWneg = gcoef(-W.*(W<0)); % Compute gateway coefficient for negative weights - - function GW = gcoef(W_) - k = sum(W_,2); % Compute node strength - Gc = (W_~=0)*diag(Ci); % Create neighbor community affiliation matrix - nmod = max(Ci); % Find # of modules - ks = zeros(n,nmod); % Preallocate space - kjs = zeros(n,nmod); % Preallocate space - cs = zeros(n,nmod); % Preallocate space - switch centtype % Which centrality measure to use? - case 1 % Node Strength - cent = sum(W_,2); - case 2 % Betweenness Centrality - L = weight_conversion(W_,'lengths'); - cent = betweenness_wei(L); - end - mcn = 0; % Set max summed centrality per module to 0 - for i = 1:nmod % For each module - if sum(cent(Ci==i))>mcn % If current module has a higher sum - mcn = sum(cent(Ci==i)); % Reassign value - end - ks(:,i) = sum(W_.*(Gc==i),2); % Compute the total weight of the connections per node to each module - end - for i = 1:nmod % For each module - if sum(Ci==i)>1 % If there is more than 1 node in a module - kjs(Ci==i,:) = ones(sum(Ci==i),1)*sum(ks(Ci==i,:)); % Compute total module-module connections - kjs(Ci==i,i) = kjs(Ci==i,i)/2; % Account for redundancy due to double counting within-network work weights - end - end - for i = 1:n % For each node - if k(i)>0 % If node is connected - for ii = 1:nmod % For each module - cs(i,ii) = sum(cent((Ci.*(W_(:,i)>0))==ii)); % Sum of centralities of neighbors of a node within a module - end - end - end - ksm = ks./kjs; % Normalize by total connections - ksm(kjs==0) = 0; % Account for division by 0 - csm = cs./mcn; % Normalize by max summed centrality - gs = (1-(ksm.*csm)).^2; % Calculate total weighting - GW = 1-sum((ks.^2)./(k.^2).*gs,2); % Compute gateway coefficient - GW(isnan(GW)) = 0; % Account for division by 0 - GW(~GW) = 0; % Set to 0 if no neighbors - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/generate_fc.m b/DefaultData/2019_03_03_BCT/generate_fc.m deleted file mode 100755 index 98b74a7..0000000 --- a/DefaultData/2019_03_03_BCT/generate_fc.m +++ /dev/null @@ -1,241 +0,0 @@ -function [FCpre,pred_data,Fcorr] = generate_fc(SC,beta,ED,pred_var,model,FC) -% GENERATE_FC Generation of synthetic functional connectivity matrices -% -% [FCpre,pred_data,Fcorr] = generate_fc(SC,beta,ED,{'SPLwei_log','SIwei_log'},FC) -% [FCpre,pred_data] = generate_fc(SC,beta,[],{'SPLwei_log','SIwei_log'}) -% -% Uses a vector beta of regression coefficients from the model -% FC = pred_var*beta to predict FC. pred_var are structural-based network -% measures derived from the structural connectivity network. -% -% Inputs: -% -% SC, -% Weighted/unweighted undirected NxN Structural Connectivity matrix. -% -% beta, -% Regression coefficients (vector). These may be obtained as an -% output parameter from function predict_fc.m -% -% ED, -% Euclidean distance matrix or upper triangular vector of the -% matrix (optional) -% -% pred_var, -% Set of M predictors. These can be given as an KxM array where -% K = ((N*(N-1))/2) and M is the number of predictors. -% Alternatively, pred_var can be a cell with the names of network -% measures to be used as predictors. Accepted network measure -% names are: -% SPLbin - Shortest-path length (binary) -% SPLwei_inv - Shortest-path length computed with an inv transform -% SPLwei_log - Shortest-path length computed with a log transform -% SPLdist - Shortest-path length computed with no transform -% SIbin - Search Information of binary shortest-paths -% SIwei_inv - Search Information of shortest-paths computed with an inv transform -% SIwei_log - Search Information of shortest-paths computed with a log transform -% SIdist - Search Information of shortest-paths computed with no transform -% T - Path Transitivity -% deltaMFPT - Column-wise z-scored mean first passage time -% neighOverlap - Neighborhood Overlap -% MI - Matching Index -% -% Predictors must be specified in the order that matches the -% given beta values. -% -% model, -% Specifies the order of the regression model used within -% matlab's function regstats.m. 'model' can be any option -% accepted by matlab's regstats.m function (e.g.'linear', -% 'interaction' 'quadratic', etc). If no model is specified, -% 'linear' is the default. -% -% FC, -% Functional connections. FC can be a NxN symmetric matrix or a -% ((N*(N-1))/2) x 1 vector containing the upper triangular -% elements of the square FC matrix (excluding diagonal elements). -% This argument is optional and only used to compute the -% correlation between the predicted FC and empirical FC. -% -% -% Outputs: -% -% FCpre, -% Predicted NxN Functional Connectivity matrix -% -% pred_data, -% KxM array of predictors. -% -% FCcorr, -% Pearson Correlation between FCpred and FC -% -% -% Reference: Goñi et al. (2014) PNAS 111: 833–838 -% -% -% Andrea Avena-Koenigsberger, Joaquin Goñi and Olaf Sporns; IU Bloomington, 2016 - - -[b1,b2] = size(beta); -if b1 == 1 && b2 >= b1 - beta = beta'; % beta must be a column vector -elseif b1 > 1 && b2 > 1 - error('beta must be a vector of scalar regression coefficients') -end - -pred_names = {'SPLbin','SPLwei_inv','SPLwei_log','SPLdist','SIbin',... - 'SIwei_inv','SIwei_log','SIdist','T','deltaMFPT','neighOverlap','MI'}; - -% select model -if ~exist('model','var') || isempty(model) - model = 'linear'; -end - -N = size(SC,1); -indx = find(triu(ones(N),1)); - -if ~exist('pred_var','var') && ~isempty(ED) - pred_var = {'ED','SPLwei_log','SI','T'}; - flag_var_names = true; - flag_ED = true; -elseif ~exist('pred_var','var') && isempty(ED) - pred_var = {'SPLwei_log','SI','T'}; - flag_var_names = true; -elseif exist('pred_var','var') && ~isnumeric(pred_var) && ~isempty(ED) - flag_var_names = true; - flag_ED = true; -elseif exist('pred_var','var') && ~isnumeric(pred_var) && isempty(ED) - flag_var_names = true; - flag_ED = false; -elseif exist('pred_var','var') && isnumeric(pred_var) && ~isempty(ED) - flag_var_names = false; - flag_ED = true; -elseif exist('pred_var','var') && isnumeric(pred_var) && isempty(ED) - flag_var_names = false; - flag_ED = false; -else - err_str = '"pred_var" must be an KxM array of M predictors, or any of the following graph-measure names:'; - s1 = sprintf('SPLbin - Shortest-path length (binary) \n'); - s2 = sprintf('SPLwei_inv - Shortest-path length computed with an inv transform \n'); - s3 = sprintf('SPLwei_log - Shortest-path length computed with a log transform \n'); - s4 = sprintf('SPLdist - Shortest-path length computed with no transform \n'); - s5 = sprintf('SIbin - Search Information of binary shortest-paths \n'); - s6 = sprintf('SIwei_inv - Search Information of shortest-paths computed with an inv transform \n'); - s7 = sprintf('SIwei_log - Search Information of shortest-paths computed with a log transform \n'); - s8 = sprintf('SIdist - Search Information of shortest-paths computed with no transform \n'); - s9 = sprintf('T - Path Transitivity \n'); - s10 = sprintf('deltaMFPT - Column-wise z-scored mean first passage time \n'); - s11 = sprintf('neighOverlap - Neighborhood Overlap \n'); - s12 = sprintf('MI - Matching Index \n'); - error('%s \n %s %s %s %s %s %s %s %s %s %s %s %s',err_str,s1,s2,s3,s4,s5,s6,s7,s8,s9,s10,s11,s12); -end - -if flag_ED - [n1,n2] = size(ED); - if n1 == n2 && n1 == N - % square ED matrix - pred_data = ED(indx); - elseif n1 == length(indx) && n2 == 1 - % ED is already an upper-triangle vector - pred_data = ED; - else - error('ED must be square matrix or a vector containing the upper triangle of the square ED matrix \n') - end -else - pred_data = []; -end - - -if flag_var_names - fprintf('\n----------------------'); - fprintf('\n Selected predictors: \n'); - ind2start = size(pred_data,2); - pred_data = [pred_data,zeros(length(indx),length(pred_var))]; - - for v = 1:length(pred_var) - var_ind = find(strcmp(pred_var{v},pred_names)); - switch var_ind - - case 1 %SPLbin - fprintf('Shortest-path length (binary) \n\n'); - data = distance_wei_floyd(double(SC>0)); - case 2 %SPLwei_inv - fprintf('Shortest-path length computed with an inv transform \n'); - data = distance_wei_floyd(SC,'inv'); - case 3 %SPLwei_log - fprintf('Shortest-path length computed with a log transform \n'); - data = distance_wei_floyd(SC,'log'); - case 4 %SPLdist - fprintf('Shortest-path length computed with no transform \n'); - data = distance_wei_floyd(SC); - case 5 %SIbin - fprintf('Search Information of binary shortest-paths \n'); - data = search_information(double(SC>0)); - data = data + data'; - case 6 %SIwei_inv - fprintf('Search Information of shortest-paths computed with an inv transform \n'); - data = search_information(SC,'inv'); - data = data + data'; - case 7 %SIwei_log - fprintf('Search Information of shortest-paths computed with a log transform \n'); - data = search_information(SC,'log'); - data = data + data'; - case 8 %SIdist - fprintf('Search Information of shortest-paths computed with no transform \n'); - data = search_information(SC); - data = data + data'; - case 9 %T - fprintf('Path Transitivity \n'); - data = path_transitivity(double(SC>0)); - case 10 %deltaMFPT - fprintf('Column-wise z-scored mean first passage time \n'); - mfpt = mean_first_passage_time(SC); - deltamfpt = zscore(mfpt,[],1); - data = deltamfpt+deltamfpt'; - case 11 %neighOverlap - fprintf('Neighborhood Overlap \n'); - data = double(SC>0) * double(SC>0)'; - case 12 %MI - fprintf('Matching Index \n'); - data = matching_ind(SC); - otherwise - error('This is not an accepted predictor. See list of available predictors \n') - end - pred_data(:,ind2start+v) = data(indx); - end -else - if size(pred_var,1) == length(indx) - pred_data = [pred_data,pred_var]; - else - error('Custom predictors must be provided as KxM array of M predictors \n'); - end -end - -pred_data = x2fx(pred_data,model); - -if size(pred_data,2) == size(beta,1) - Y = pred_data*beta; - FCpre = zeros(N); - FCpre(indx) = Y; - FCpre = FCpre+FCpre'; -end - -if nargin == 6 && ~isempty(FC) - flag_nan_corr = false; - [n1,n2] = size(FC); - if n1 == n2 && n1 == N - % square FC matrix - FCemp = FC(indx); - elseif n1 == length(indx) && n2 == 1 - % FC is already an upper-triangle vector - FCemp = FC; - else - warning('FC must be square matrix or a vector containing the upper triangle (no diagonal elements) of the square FC matrix \n') - flag_nan_corr = true; - end - if ~flag_nan_corr - Fcorr = corr(Y,FCemp); - else - Fcorr = nan; - end -end diff --git a/DefaultData/2019_03_03_BCT/generative_model.m b/DefaultData/2019_03_03_BCT/generative_model.m deleted file mode 100755 index c5516fe..0000000 --- a/DefaultData/2019_03_03_BCT/generative_model.m +++ /dev/null @@ -1,897 +0,0 @@ -function b = generative_model(A,D,m,modeltype,modelvar,params,epsilon) -% GENERATIVE_MODEL Run generative model code -% -% B = GENERATIVE_MODEL(A,D,m,modeltype,modelvar,params) -% -% Generates synthetic networks using the models described in the study by -% Betzel et al (2016) in Neuroimage. -% -% Inputs: -% A, binary network of seed connections -% D, Euclidean distance/fiber length matrix -% m, number of connections that should be present in -% final synthetic network -% modeltype, specifies the generative rule (see below) -% modelvar, specifies whether the generative rules are based on -% power-law or exponential relationship -% ({'powerlaw'}|{'exponential}) -% params, either a vector (in the case of the geometric -% model) or a matrix (for all other models) of -% parameters at which the model should be evaluated. -% epsilon, the baseline probability of forming a particular -% connection (should be a very small number -% {default = 1e-5}). -% -% Output: -% B, m x number of networks matrix of connections -% -% -% Full list of model types: -% (each model type realizes a different generative rule) -% -% 1. 'sptl' spatial model -% 2. 'neighbors' number of common neighbors -% 3. 'matching' matching index -% 4. 'clu-avg' average clustering coeff. -% 5. 'clu-min' minimum clustering coeff. -% 6. 'clu-max' maximum clustering coeff. -% 7. 'clu-diff' difference in clustering coeff. -% 8. 'clu-prod' product of clustering coeff. -% 9. 'deg-avg' average degree -% 10. 'deg-min' minimum degree -% 11. 'deg-max' maximum degree -% 12. 'deg-diff' difference in degree -% 13. 'deg-prod' product of degree -% -% -% Example usage: -% -% load demo_generative_models_data -% -% % get number of bi-directional connections -% m = nnz(A)/2; -% -% % get cardinality of network -% n = length(A); -% -% % set model type -% modeltype = 'neighbors'; -% -% % set whether the model is based on powerlaw or exponentials -% modelvar = [{'powerlaw'},{'powerlaw'}]; -% -% % choose some model parameters -% params = [-2,0.2; -5,1.2; -1,1.5]; -% nparams = size(params,1); -% -% % generate synthetic networks -% B = generative_model(Aseed,D,m,modeltype,modelvar,params); -% -% % store them in adjacency matrix format -% Asynth = zeros(n,n,nparams); -% for i = 1:nparams; -% a = zeros(n); a(B(:,i)) = 1; a = a + a'; -% Asynth(:,:,i) = a; -% end -% -% Reference: Betzel et al (2016) Neuroimage 124:1054-64. -% -% Richard Betzel, Indiana University/University of Pennsylvania, 2015 - -if ~exist('epsilon','var') - epsilon = 1e-5; -end - -n = length(D); -nparams = size(params,1); -b = zeros(m,nparams); - -switch modeltype - - case 'clu-avg' - clu = clustering_coef_bu(A); - Kseed = bsxfun(@plus,clu(:,ones(1,n)),clu')/2; - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_clu_avg(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'clu-diff' - clu = clustering_coef_bu(A); - Kseed = abs(bsxfun(@minus,clu(:,ones(1,n)),clu')); - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_clu_diff(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'clu-max' - clu = clustering_coef_bu(A); - Kseed = bsxfun(@max,clu(:,ones(1,n)),clu'); - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_clu_max(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'clu-min' - clu = clustering_coef_bu(A); - Kseed = bsxfun(@min,clu(:,ones(1,n)),clu'); - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_clu_min(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'clu-prod' - clu = clustering_coef_bu(A); - Kseed = clu*clu'; - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_clu_prod(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'deg-avg' - kseed = sum(A,2); - Kseed = bsxfun(@plus,kseed(:,ones(1,n)),kseed')/2; - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_deg_avg(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'deg-diff' - kseed = sum(A,2); - Kseed = abs(bsxfun(@minus,kseed(:,ones(1,n)),kseed')); - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_deg_diff(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'deg-max' - kseed = sum(A,2); - Kseed = bsxfun(@max,kseed(:,ones(1,n)),kseed'); - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_deg_max(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'deg-min' - kseed = sum(A,2); - Kseed = bsxfun(@min,kseed(:,ones(1,n)),kseed'); - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_deg_min(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'deg-prod' - kseed = sum(A,2); - Kseed = (kseed*kseed').*~eye(n); - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_deg_prod(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'neighbors' - Kseed = (A*A).*~eye(n); - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_nghbrs(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'matching' - Kseed = matching_ind(A); - Kseed = Kseed + Kseed'; - for iparam = 1:nparams - eta = params(iparam,1); - gam = params(iparam,2); - b(:,iparam) = fcn_matching(A,Kseed,D,m,eta,gam,modelvar,epsilon); - end - - case 'sptl' - for iparam = 1:nparams - eta = params(iparam,1); - b(:,iparam) = fcn_sptl(A,D,m,eta,modelvar{1}); - end - -end - -function b = fcn_clu_avg(A,K,D,m,eta,gam,modelvar,epsilon) -K = K + epsilon; -n = length(D); -mseed = nnz(A)/2; -A = A > 0; -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end - -c = clustering_coef_bu(A); -k = sum(A,2); - -Ff = Fd.*Fk.*~A; -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -P = Ff(indx); - -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - uu = u(r); - vv = v(r); - A(uu,vv) = 1; - A(vv,uu) = 1; - k([uu,vv]) = k([uu,vv]) + 1; - bu = A(uu,:); - su = A(bu,bu); - bv = A(vv,:); - sv = A(bv,bv); - bth = bu & bv; - c(bth) = c(bth) + 2./(k(bth).^2 - k(bth)); - c(uu) = nnz(su)/(k(uu)*(k(uu) - 1)); - c(vv) = nnz(sv)/(k(vv)*(k(vv) - 1)); - c(k <= 1) = 0; - bth([uu,vv]) = true; - K(:,bth) = bsxfun(@plus,c(:,ones(1,sum(bth))),c(bth,:)')/2 + epsilon; - K(bth,:) = bsxfun(@plus,c(:,ones(1,sum(bth))),c(bth,:)')'/2 + epsilon; - - switch mv2 - case 'powerlaw' - Ff(bth,:) = Fd(bth,:).*((K(bth,:)).^gam); - Ff(:,bth) = Fd(:,bth).*((K(:,bth)).^gam); - case 'exponential' - Ff(bth,:) = Fd(bth,:).*exp((K(bth,:))*gam); - Ff(:,bth) = Fd(:,bth).*exp((K(:,bth))*gam); - end - Ff = Ff.*~A; - P = Ff(indx); -end -b = find(triu(A,1)); - -function b = fcn_clu_diff(A,K,D,m,eta,gam,modelvar,epsilon) -K = K + epsilon; -n = length(D); -mseed = nnz(A)/2; -A = A > 0; -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end - -c = clustering_coef_bu(A); -k = sum(A,2); - -Ff = Fd.*Fk.*~A; -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -P = Ff(indx); - -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - uu = u(r); - vv = v(r); - A(uu,vv) = 1; - A(vv,uu) = 1; - k([uu,vv]) = k([uu,vv]) + 1; - bu = A(uu,:); - su = A(bu,bu); - bv = A(vv,:); - sv = A(bv,bv); - bth = bu & bv; - c(bth) = c(bth) + 2./(k(bth).^2 - k(bth)); - c(uu) = nnz(su)/(k(uu)*(k(uu) - 1)); - c(vv) = nnz(sv)/(k(vv)*(k(vv) - 1)); - c(k <= 1) = 0; - bth([uu,vv]) = true; - K(:,bth) = abs(bsxfun(@minus,c(:,ones(1,sum(bth))),c(bth,:)')) + epsilon; - K(bth,:) = abs(bsxfun(@minus,c(:,ones(1,sum(bth))),c(bth,:)'))' + epsilon; - - switch mv2 - case 'powerlaw' - Ff(bth,:) = Fd(bth,:).*((K(bth,:)).^gam); - Ff(:,bth) = Fd(:,bth).*((K(:,bth)).^gam); - case 'exponential' - Ff(bth,:) = Fd(bth,:).*exp((K(bth,:))*gam); - Ff(:,bth) = Fd(:,bth).*exp((K(:,bth))*gam); - end - Ff = Ff.*~A; - P = Ff(indx); -end -b = find(triu(A,1)); - -function b = fcn_clu_max(A,K,D,m,eta,gam,modelvar,epsilon) -K = K + epsilon; -n = length(D); -mseed = nnz(A)/2; -A = A > 0; -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end - -c = clustering_coef_bu(A); -k = sum(A,2); - -Ff = Fd.*Fk.*~A; -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -P = Ff(indx); - -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - uu = u(r); - vv = v(r); - A(uu,vv) = 1; - A(vv,uu) = 1; - k([uu,vv]) = k([uu,vv]) + 1; - bu = A(uu,:); - su = A(bu,bu); - bv = A(vv,:); - sv = A(bv,bv); - bth = bu & bv; - c(bth) = c(bth) + 2./(k(bth).^2 - k(bth)); - c(uu) = nnz(su)/(k(uu)*(k(uu) - 1)); - c(vv) = nnz(sv)/(k(vv)*(k(vv) - 1)); - c(k <= 1) = 0; - bth([uu,vv]) = true; - K(:,bth) = bsxfun(@max,c(:,ones(1,sum(bth))),c(bth,:)') + epsilon; - K(bth,:) = bsxfun(@max,c(:,ones(1,sum(bth))),c(bth,:)')' + epsilon; - - switch mv2 - case 'powerlaw' - Ff(bth,:) = Fd(bth,:).*((K(bth,:)).^gam); - Ff(:,bth) = Fd(:,bth).*((K(:,bth)).^gam); - case 'exponential' - Ff(bth,:) = Fd(bth,:).*exp((K(bth,:))*gam); - Ff(:,bth) = Fd(:,bth).*exp((K(:,bth))*gam); - end - Ff = Ff.*~A; - P = Ff(indx); -end -b = find(triu(A,1)); - -function b = fcn_clu_min(A,K,D,m,eta,gam,modelvar,epsilon) -K = K + epsilon; -n = length(D); -mseed = nnz(A)/2; -A = A > 0; -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end - -c = clustering_coef_bu(A); -k = sum(A,2); - -Ff = Fd.*Fk.*~A; -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -P = Ff(indx); - -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - uu = u(r); - vv = v(r); - A(uu,vv) = 1; - A(vv,uu) = 1; - k([uu,vv]) = k([uu,vv]) + 1; - bu = A(uu,:); - su = A(bu,bu); - bv = A(vv,:); - sv = A(bv,bv); - bth = bu & bv; - c(bth) = c(bth) + 2./(k(bth).^2 - k(bth)); - c(uu) = nnz(su)/(k(uu)*(k(uu) - 1)); - c(vv) = nnz(sv)/(k(vv)*(k(vv) - 1)); - c(k <= 1) = 0; - bth([uu,vv]) = true; - K(:,bth) = bsxfun(@min,c(:,ones(1,sum(bth))),c(bth,:)') + epsilon; - K(bth,:) = bsxfun(@min,c(:,ones(1,sum(bth))),c(bth,:)')' + epsilon; - - switch mv2 - case 'powerlaw' - Ff(bth,:) = Fd(bth,:).*((K(bth,:)).^gam); - Ff(:,bth) = Fd(:,bth).*((K(:,bth)).^gam); - case 'exponential' - Ff(bth,:) = Fd(bth,:).*exp((K(bth,:))*gam); - Ff(:,bth) = Fd(:,bth).*exp((K(:,bth))*gam); - end - Ff = Ff.*~A; - P = Ff(indx); -end -b = find(triu(A,1)); - -function b = fcn_clu_prod(A,K,D,m,eta,gam,modelvar,epsilon) -K = K + epsilon; -n = length(D); -mseed = nnz(A)/2; -A = A > 0; -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end - -c = clustering_coef_bu(A); -k = sum(A,2); - -Ff = Fd.*Fk.*~A; -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -P = Ff(indx); - -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - uu = u(r); - vv = v(r); - A(uu,vv) = 1; - A(vv,uu) = 1; - k([uu,vv]) = k([uu,vv]) + 1; - bu = A(uu,:); - su = A(bu,bu); - bv = A(vv,:); - sv = A(bv,bv); - bth = bu & bv; - c(bth) = c(bth) + 2./(k(bth).^2 - k(bth)); - c(uu) = nnz(su)/(k(uu)*(k(uu) - 1)); - c(vv) = nnz(sv)/(k(vv)*(k(vv) - 1)); - c(k <= 1) = 0; - bth([uu,vv]) = true; - K(bth,:) = (c(bth,:)*c') + epsilon; - K(:,bth) = (c*c(bth,:)') + epsilon; - - switch mv2 - case 'powerlaw' - Ff(bth,:) = Fd(bth,:).*((K(bth,:)).^gam); - Ff(:,bth) = Fd(:,bth).*((K(:,bth)).^gam); - case 'exponential' - Ff(bth,:) = Fd(bth,:).*exp((K(bth,:))*gam); - Ff(:,bth) = Fd(:,bth).*exp((K(:,bth))*gam); - end - Ff = Ff.*~A; - P = Ff(indx); -end -b = find(triu(A,1)); - -function b = fcn_deg_avg(A,K,D,m,eta,gam,modelvar,epsilon) -n = length(D); -mseed = nnz(A)/2; -k = sum(A,2); -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -D = D(indx); -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -K = K + epsilon; -switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end -P = Fd.*Fk(indx).*~A(indx); -b = zeros(m,1); -b(1:mseed) = find(A(indx)); -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - w = [u(r),v(r)]; - k(w) = k(w) + 1; - switch mv2 - case 'powerlaw' - Fk(:,w) = [((k + k(w(1)))/2) + epsilon, ((k + k(w(2)))/2) + epsilon].^gam; - Fk(w,:) = ([((k + k(w(1)))/2) + epsilon, ((k + k(w(2)))/2) + epsilon].^gam)'; - case 'exponential' - Fk(:,w) = exp([((k + k(w(1)))/2) + epsilon, ((k + k(w(2)))/2) + epsilon]*gam); - Fk(w,:) = exp([((k + k(w(1)))/2) + epsilon, ((k + k(w(2)))/2) + epsilon]*gam)'; - end - P = Fd.*Fk(indx); - b(i) = r; - P(b(1:i)) = 0; -end -b = indx(b); - -function b = fcn_deg_diff(A,K,D,m,eta,gam,modelvar,epsilon) -n = length(D); -mseed = nnz(A)/2; -k = sum(A,2); -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -D = D(indx); -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -K = K + epsilon; -switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end -P = Fd.*Fk(indx).*~A(indx); -b = zeros(m,1); -b(1:mseed) = find(A(indx)); -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - - w = [u(r),v(r)]; - k(w) = k(w) + 1; - switch mv2 - case 'powerlaw' - Fk(:,w) = (abs([k - k(w(1)), k - k(w(2))]) + epsilon).^gam; - Fk(w,:) = ((abs([k - k(w(1)), k - k(w(2))]) + epsilon).^gam)'; - case 'exponential' - Fk(:,w) = exp((abs([k - k(w(1)), k - k(w(2))]) + epsilon)*gam); - Fk(w,:) = exp((abs([k - k(w(1)), k - k(w(2))]) + epsilon)*gam)'; - end - P = Fd.*Fk(indx); - b(i) = r; - P(b(1:i)) = 0; -end -b = indx(b); - -function b = fcn_deg_min(A,K,D,m,eta,gam,modelvar,epsilon) -n = length(D); -mseed = nnz(A)/2; -k = sum(A,2); -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -D = D(indx); -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -K = K + epsilon; -switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end -P = Fd.*Fk(indx).*~A(indx); -b = zeros(m,1); -b(1:mseed) = find(A(indx)); -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - w = [u(r),v(r)]; - k(w) = k(w) + 1; - switch mv2 - case 'powerlaw' - Fk(:,w) = [min(k,k(w(1))) + epsilon, min(k,k(w(2))) + epsilon].^gam; - Fk(w,:) = ([min(k,k(w(1))) + epsilon, min(k,k(w(2))) + epsilon].^gam)'; - case 'exponential' - Fk(:,w) = exp([min(k,k(w(1))) + epsilon, min(k,k(w(2))) + epsilon]*gam); - Fk(w,:) = exp([min(k,k(w(1))) + epsilon, min(k,k(w(2))) + epsilon]*gam)'; - end - P = Fd.*Fk(indx); - b(i) = r; - P(b(1:i)) = 0; -end -b = indx(b); - -function b = fcn_deg_max(A,K,D,m,eta,gam,modelvar,epsilon) -n = length(D); -mseed = nnz(A)/2; -k = sum(A,2); -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -D = D(indx); -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -K = K + epsilon; -switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end -P = Fd.*Fk(indx).*~A(indx); -b = zeros(m,1); -b(1:mseed) = find(A(indx)); -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - w = [u(r),v(r)]; - k(w) = k(w) + 1; - switch mv2 - case 'powerlaw' - Fk(:,w) = [max(k,k(w(1))) + epsilon, max(k,k(w(2))) + epsilon].^gam; - Fk(w,:) = ([max(k,k(w(1))) + epsilon, max(k,k(w(2))) + epsilon].^gam)'; - case 'exponential' - Fk(:,w) = exp([max(k,k(w(1))) + epsilon, max(k,k(w(2))) + epsilon]*gam); - Fk(w,:) = exp([max(k,k(w(1))) + epsilon, max(k,k(w(2))) + epsilon]*gam)'; - end - P = Fd.*Fk(indx); - b(i) = r; - P(b(1:i)) = 0; -end -b = indx(b); - -function b = fcn_deg_prod(A,K,D,m,eta,gam,modelvar,epsilon) -n = length(D); -mseed = nnz(A)/2; -k = sum(A,2); -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -D = D(indx); -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -K = K + epsilon; -switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end -P = Fd.*Fk(indx).*~A(indx); -b = zeros(m,1); -b(1:mseed) = find(A(indx)); -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - w = [u(r),v(r)]; - k(w) = k(w) + 1; - switch mv2 - case 'powerlaw' - Fk(:,w) = ([k*k(w(1)) + epsilon, k*k(w(2)) + epsilon].^gam); - Fk(w,:) = (([k*k(w(1)) + epsilon, k*k(w(2)) + epsilon].^gam)'); - case 'exponential' - Fk(:,w) = exp([k*k(w(1)) + epsilon, k*k(w(2)) + epsilon]*gam); - Fk(w,:) = exp([k*k(w(1)) + epsilon, k*k(w(2)) + epsilon]*gam)'; - end - P = Fd.*Fk(indx); - b(i) = r; - P(b(1:i)) = 0; -end -b = indx(b); - -function b = fcn_nghbrs(A,K,D,m,eta,gam,modelvar,epsilon) -K = K + epsilon; -n = length(D); -mseed = nnz(A)/2; -A = A > 0; -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -switch mv2 - case 'powerlaw' -% gam = abs(gam); - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end -Ff = Fd.*Fk.*~A; -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -P = Ff(indx); -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - uu = u(r); - vv = v(r); - x = A(uu,:); - y = A(:,vv); - A(uu,vv) = 1; - A(vv,uu) = 1; - K(uu,y) = K(uu,y) + 1; - K(y,uu) = K(y,uu) + 1; - K(vv,x) = K(vv,x) + 1; - K(x,vv) = K(x,vv) + 1; - switch mv2 - case 'powerlaw' - Ff(uu,y) = Fd(uu,y).*(K(uu,y).^gam); - Ff(y,uu) = Ff(uu,y)'; - Ff(vv,x) = Fd(vv,x).*(K(vv,x).^gam); - Ff(x,vv) = Ff(vv,x)'; - case 'exponential' - Ff(uu,y) = Fd(uu,y).*exp(K(uu,y)*gam); - Ff(y,uu) = Ff(uu,y)'; - Ff(vv,x) = Fd(vv,x).*exp(K(vv,x)*gam); - Ff(x,vv) = Ff(vv,x)'; - end - Ff(A) = 0; - P = Ff(indx); -end -b = find(triu(A,1)); - -function b = fcn_matching(A,K,D,m,eta,gam,modelvar,epsilon) -K = K + epsilon; -n = length(D); -mseed = nnz(A)/2; -mv1 = modelvar{1}; -mv2 = modelvar{2}; -switch mv1 - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); -end -Ff = Fd.*Fk.*~A; -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -P = Ff(indx); -for ii = (mseed + 1):m - - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - uu = u(r); - vv = v(r); - - A(uu,vv) = 1; - A(vv,uu) = 1; - - updateuu = find(A*A(:,uu)); - updateuu(updateuu == uu) = []; - updateuu(updateuu == vv) = []; - - updatevv = find(A*A(:,vv)); - updatevv(updatevv == uu) = []; - updatevv(updatevv == vv) = []; - - c1 = [A(:,uu)', A(uu,:)]; - for i = 1:length(updateuu) - j = updateuu(i); - c2 = [A(:,j)' A(j,:)]; - use = ~(~c1&~c2); - use(uu) = 0; use(uu+n) = 0; - use(j) = 0; use(j+n) = 0; - ncon = sum(c1(use))+sum(c2(use)); - if (ncon==0) - K(uu,j) = epsilon; - K(j,uu) = epsilon; - else - K(uu,j) = (2*(sum(c1(use)&c2(use))/ncon)) + epsilon; - K(j,uu) = K(uu,j); - end - - end - - c1 = [A(:,vv)', A(vv,:)]; - for i = 1:length(updatevv) - j = updatevv(i); - c2 = [A(:,j)' A(j,:)]; - use = ~(~c1&~c2); - use(vv) = 0; use(vv+n) = 0; - use(j) = 0; use(j+n) = 0; - ncon = sum(c1(use))+sum(c2(use)); - if (ncon==0) - K(vv,j) = epsilon; - K(j,vv) = epsilon; - else - K(vv,j) = (2*(sum(c1(use)&c2(use))/ncon)) + epsilon; - K(j,vv) = K(vv,j); - end - end - switch mv2 - case 'powerlaw' - Fk = K.^gam; - case 'exponential' - Fk = exp(gam*K); - end - Ff = Fd.*Fk.*~A; - P = Ff(indx); -end -b = find(triu(A,1)); - -function b = fcn_sptl(A,D,m,eta,modelvar) -n = length(D); -mseed = nnz(A)/2; -switch modelvar - case 'powerlaw' - Fd = D.^eta; - case 'exponential' - Fd = exp(eta*D); -end -[u,v] = find(triu(ones(n),1)); -indx = (v - 1)*n + u; -P = Fd(indx).*~A(indx); -b = zeros(m,1); -b(1:mseed) = find(A(indx)); -for i = (mseed + 1):m - C = [0; cumsum(P)]; - r = sum(rand*C(end) >= C); - b(i) = r; - P = Fd(indx); - P(b(1:i)) = 0; -end -b = indx(b); diff --git a/DefaultData/2019_03_03_BCT/get_components.m b/DefaultData/2019_03_03_BCT/get_components.m deleted file mode 100755 index b05bbbd..0000000 --- a/DefaultData/2019_03_03_BCT/get_components.m +++ /dev/null @@ -1,57 +0,0 @@ -function [comps,comp_sizes] = get_components(adj) -% GET_COMPONENTS Connected components -% -% [comps,comp_sizes] = get_components(adj); -% -% Returns the components of an undirected graph specified by the binary and -% undirected adjacency matrix adj. Components and their constitutent nodes are -% assigned the same index and stored in the vector, comps. The vector, comp_sizes, -% contains the number of nodes beloning to each component. -% -% Inputs: adj, binary and undirected adjacency matrix -% -% Outputs: comps, vector of component assignments for each node -% comp_sizes, vector of component sizes -% -% Note: disconnected nodes will appear as components of size 1 -% -% J Goni, University of Navarra and Indiana University, 2009/2011 - -if size(adj,1)~=size(adj,2) - error('this adjacency matrix is not square'); -end - -if ~any(adj-triu(adj)) - adj = adj | adj'; -end - -%if main diagonal of adj do not contain all ones, i.e. autoloops -if sum(diag(adj))~=size(adj,1) - - %the main diagonal is set to ones - adj = adj|speye(size(adj)); -end - -%Dulmage-Mendelsohn decomposition -[~,p,~,r] = dmperm(adj); - -%p indicates a permutation (along rows and columns) -%r is a vector indicating the component boundaries - -% List including the number of nodes of each component. ith entry is r(i+1)-r(i) -comp_sizes = diff(r); - -% Number of components found. -num_comps = numel(comp_sizes); - -% initialization -comps = zeros(1,size(adj,1)); - -% first position of each component is set to one -comps(r(1:num_comps)) = ones(1,num_comps); - -% cumulative sum produces a label for each component (in a consecutive way) -comps = cumsum(comps); - -%re-order component labels according to adj. -comps(p) = comps; diff --git a/DefaultData/2019_03_03_BCT/grid_communities.m b/DefaultData/2019_03_03_BCT/grid_communities.m deleted file mode 100755 index 25b4590..0000000 --- a/DefaultData/2019_03_03_BCT/grid_communities.m +++ /dev/null @@ -1,46 +0,0 @@ -function [X,Y,indsort] = grid_communities(c) -% GRID_COMMUNITIES Outline communities along diagonal -% -% [X Y INDSORT] = GRID_COMMUNITIES(C) takes a vector of community -% assignments C and returns three output arguments for visualizing the -% communities. The third is INDSORT, which is an ordering of the vertices -% so that nodes with the same community assignment are next to one -% another. The first two arguments are vectors that, when overlaid on the -% adjacency matrix using the PLOT function, highlight the communities. -% -% Example: -% -% >> load AIJ; % load adjacency matrix -% >> [C,Q] = modularity_louvain_und(AIJ); % get community assignments -% >> [X,Y,INDSORT] = fcn_grid_communities(C); % call function -% >> imagesc(AIJ(INDSORT,INDSORT)); % plot ordered adjacency matrix -% >> hold on; % hold on to overlay community visualization -% >> plot(X,Y,'r','linewidth',2); % plot community boundaries -% -% Inputs: C, community assignments -% -% Outputs: X, x coor -% Y, y coor -% INDSORT, indices -% -% Richard Betzel, Indiana University, 2012 -% - -%#ok<*AGROW> - -nc = max(c); -[c,indsort] = sort(c); - -X = []; -Y = []; -for i = 1:nc - ind = find(c == i); - if ~isempty(ind) - mn = min(ind) - 0.5; - mx = max(ind) + 0.5; - x = [mn mn mx mx mn NaN]; - y = [mn mx mx mn mn NaN]; - X = [X, x]; - Y = [Y, y]; - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/gtom.m b/DefaultData/2019_03_03_BCT/gtom.m deleted file mode 100755 index 54dc653..0000000 --- a/DefaultData/2019_03_03_BCT/gtom.m +++ /dev/null @@ -1,81 +0,0 @@ -function gt = gtom(adj,numSteps) -%GTOM Generalized topological overlap measure -% -% gt = gtom(adj,numSteps); -% -% The m-th step generalized topological overlap measure (GTOM) quantifies -% the extent to which a pair of nodes have similar m-th step neighbors. -% Mth-step neighbors are nodes that are reachable by a path of at most -% length m. -% -% This function computes the the M x M generalized topological overlap -% measure (GTOM) matrix for number of steps, numSteps. -% -% Inputs: adj, adjacency matrix (binary,undirected) -% numSteps, number of steps -% -% Outputs: gt, GTOM matrix -% -% NOTE: When numSteps is equal to 1, GTOM is identical to the topological -% overlap measure (TOM) from reference [2]. In that case the 'gt' matrix -% records, for each pair of nodes, the fraction of neighbors the two -% nodes share in common, where "neighbors" are one step removed. As -% 'numSteps' is increased, neighbors that are furter out are considered. -% Elements of 'gt' are bounded between 0 and 1. The 'gt' matrix can be -% converted from a similarity to a distance matrix by taking 1-gt. -% -% References: [1] Yip & Horvath (2007) BMC Bioinformatics 2007, 8:22 -% [2] Ravasz et al (2002) Science 297 (5586), 1551. -% -% J Goni, University of Navarra and Indiana University, 2009/2011 - -%#ok<*ASGLU> - -%initial state for bm matrix; -bm = adj; -bmAux = bm; -numNodes = size(adj,1); - -if (numSteps > numNodes) - disp('warning, reached maximum value for numSteps. numSteps reduced to adj-size') - numSteps = numNodes; -end - -if (numSteps == 0) - %GTOM0 - gt = adj; -else - - for steps = 2:numSteps - for i = 1:numNodes - - %neighbours of node i - [neighRow,neighColumn] = find(bm(i,:)==1); - - %neighbours of neighbours of node i - [neighNeighRow,neighNeighColumn] = find(bm(neighColumn,:)==1); - newNeigh = setdiff(unique(neighNeighColumn),i); - - %neighbours of neighbours of node i become considered node i neighbours - bmAux(i,newNeigh) = 1; - - %keep symmetry of matrix - bmAux(newNeigh,i) = 1; - end - %bm is updated with new step all at once - bm = bmAux; - - end - - clear bmAux newNeigh; - - %numerators of GTOM formula - numeratorMatrix = bm*bm + adj + speye(numNodes,numNodes); - - %vector containing degree of each node - bmSum=sum(bm); - clear bm; - - denominatorMatrix = -adj + min(repmat(bmSum,numNodes,1),repmat(bmSum',1,numNodes)) + 1; - gt = numeratorMatrix ./ denominatorMatrix; -end diff --git a/DefaultData/2019_03_03_BCT/jdegree.m b/DefaultData/2019_03_03_BCT/jdegree.m deleted file mode 100755 index fa262ae..0000000 --- a/DefaultData/2019_03_03_BCT/jdegree.m +++ /dev/null @@ -1,48 +0,0 @@ -function [J,J_od,J_id,J_bl] = jdegree(CIJ) -%JDEGREE Joint degree distribution -% -% [J,J_od,J_id,J_bl] = jdegree(CIJ); -% -% This function returns a matrix in which the value of each element (u,v) -% corresponds to the number of nodes that have u outgoing connections -% and v incoming connections. -% -% Input: CIJ, directed (weighted/binary) connection matrix -% -% Outputs: J, joint degree distribution matrix (shifted by one) -% J_od, number of vertices with od>id. -% J_id, number of vertices with id>od. -% J_bl, number of vertices with id=od. -% -% Note: Weights are discarded. -% -% -% Olaf Sporns, Indiana University, 2002/2006/2008 - - -% ensure CIJ is binary... -CIJ = double(CIJ~=0); - -N = size(CIJ,1); - -id = sum(CIJ,1); % indegree = column sum of CIJ -od = sum(CIJ,2)'; % outdegree = row sum of CIJ - -% Create the joint degree distribution matrix -% Note: the matrix is shifted by one, to accomodate zero id and od in the first row/column. -% Upper triangular part of the matrix has vertices with an excess of -% outgoing edges (od>id) -% Lower triangular part of the matrix has vertices with an excess of -% outgoing edges (id>od) -% Main diagonal has units with id=od - -szJ = max(max(id,od))+1; -J = zeros(szJ); - -for i=1:N - J(id(i)+1,od(i)+1) = J(id(i)+1,od(i)+1) + 1; -end; - -J_od = sum(sum(triu(J,1))); -J_id = sum(sum(tril(J,-1))); -J_bl = sum(diag(J)); diff --git a/DefaultData/2019_03_03_BCT/kcore_bd.m b/DefaultData/2019_03_03_BCT/kcore_bd.m deleted file mode 100755 index 44a4b3f..0000000 --- a/DefaultData/2019_03_03_BCT/kcore_bd.m +++ /dev/null @@ -1,59 +0,0 @@ -function [CIJkcore,kn,peelorder,peellevel] = kcore_bd(CIJ,k) -%KCORE_BD K-core -% -% [CIJkcore,kn,peelorder,peellevel] = kcore_bd(CIJ,k); -% -% The k-core is the largest subnetwork comprising nodes of degree at -% least k. This function computes the k-core for a given binary directed -% connection matrix by recursively peeling off nodes with degree lower -% than k, until no such nodes remain. -% -% input: CIJ, connection/adjacency matrix (binary, directed) -% k, level of k-core -% -% output: CIJkcore, connection matrix of the k-core. This matrix -% only contains nodes of degree at least k. -% kn, size of k-core -% peelorder, indices in the order in which they were -% peeled away during k-core decomposition -% peellevel, corresponding level - nodes at the same -% level have been peeled away at the same time -% -% 'peelorder' and 'peellevel' are similar the the k-core sub-shells -% described in Modha and Singh (2010). -% -% References: e.g. Hagmann et al. (2008) PLoS Biology -% -% Olaf Sporns, Indiana University, 2007/2008/2010/2012 - -%#ok<*ASGLU> -%#ok<*AGROW> - -peelorder = []; -peellevel = []; -iter = 0; - -while 1 - - % get degrees of matrix - [id,od,deg] = degrees_dir(CIJ); - - % find nodes with degree 0)); - - % if none found -> stop - if (isempty(ff)) break; end; %#ok - - % peel away found nodes - iter = iter+1; - CIJ(ff,:) = 0; - CIJ(:,ff) = 0; - - peelorder = [peelorder; ff']; - peellevel = [peellevel; iter.*ones(1,length(ff))']; - -end; - -CIJkcore = CIJ; -kn = sum(deg>0); - diff --git a/DefaultData/2019_03_03_BCT/kcore_bu.m b/DefaultData/2019_03_03_BCT/kcore_bu.m deleted file mode 100755 index b0bd23b..0000000 --- a/DefaultData/2019_03_03_BCT/kcore_bu.m +++ /dev/null @@ -1,58 +0,0 @@ -function [CIJkcore,kn,peelorder,peellevel] = kcore_bu(CIJ,k) -%KCORE_BU K-core -% -% [CIJkcore,kn,peelorder,peellevel] = kcore_bu(CIJ,k); -% -% The k-core is the largest subnetwork comprising nodes of degree at -% least k. This function computes the k-core for a given binary -% undirected connection matrix by recursively peeling off nodes with -% degree lower than k, until no such nodes remain. -% -% input: CIJ, connection/adjacency matrix (binary, undirected) -% k, level of k-core -% -% output: CIJkcore, connection matrix of the k-core. This matrix -% only contains nodes of degree at least k. -% kn, size of k-core -% peelorder, indices in the order in which they were -% peeled away during k-core decomposition -% peellevel, corresponding level - nodes at the same -% level were peeled away at the same time -% -% 'peelorder' and 'peellevel' are similar the the k-core sub-shells -% described in Modha and Singh (2010). -% -% References: e.g. Hagmann et al. (2008) PLoS Biology -% -% Olaf Sporns, Indiana University, 2007/2008/2010/2012 - -%#ok<*AGROW> - -peelorder = []; -peellevel = []; -iter = 0; - -while 1 - - % get degrees of matrix - [deg] = degrees_und(CIJ); - - % find nodes with degree 0)); - - % if none found -> stop - if (isempty(ff)) break; end; %#ok - - % peel away found nodes - iter = iter+1; - CIJ(ff,:) = 0; - CIJ(:,ff) = 0; - - peelorder = [peelorder; ff']; - peellevel = [peellevel; iter.*ones(1,length(ff))']; - -end; - -CIJkcore = CIJ; -kn = sum(deg>0); - diff --git a/DefaultData/2019_03_03_BCT/kcoreness_centrality_bd.m b/DefaultData/2019_03_03_BCT/kcoreness_centrality_bd.m deleted file mode 100755 index b59ad5e..0000000 --- a/DefaultData/2019_03_03_BCT/kcoreness_centrality_bd.m +++ /dev/null @@ -1,27 +0,0 @@ -function [coreness,kn] = kcoreness_centrality_bd(CIJ) -%KCORENESS_CENTRALITY_BD K-coreness centrality -% -% [coreness,kn] = kcoreness_centrality_bd(CIJ) -% -% The k-core is the largest subgraph comprising nodes of degree at least -% k. The coreness of a node is k if the node belongs to the k-core but -% not to the (k+1)-core. This function computes k-coreness of all nodes -% for a given binary directed connection matrix. -% -% input: CIJ, connection/adjacency matrix (binary, directed) -% -% output: coreness, node coreness. -% kn, size of k-core -% -% References: e.g. Hagmann et al. (2008) PLoS Biology -% -% Olaf Sporns, Indiana University, 2007/2008/2010/2012 - -N = size(CIJ,1); - -coreness = zeros(1,N); kn = zeros(1,N); -for k=1:N - [CIJkcore,kn(k)] = kcore_bd(CIJ,k); - ss = sum(CIJkcore)>0; - coreness(ss) = k; -end; diff --git a/DefaultData/2019_03_03_BCT/kcoreness_centrality_bu.m b/DefaultData/2019_03_03_BCT/kcoreness_centrality_bu.m deleted file mode 100755 index 0db57a7..0000000 --- a/DefaultData/2019_03_03_BCT/kcoreness_centrality_bu.m +++ /dev/null @@ -1,34 +0,0 @@ -function [coreness,kn] = kcoreness_centrality_bu(CIJ) -%KCORENESS_CENTRALITY_BU K-coreness centrality -% -% [coreness,kn] = kcoreness_centrality_bu(CIJ) -% -% The k-core is the largest subgraph comprising nodes of degree at least -% k. The coreness of a node is k if the node belongs to the k-core but -% not to the (k+1)-core. This function computes the coreness of all nodes -% for a given binary undirected connection matrix. -% -% input: CIJ, connection/adjacency matrix (binary, undirected) -% -% output: coreness, node coreness. -% kn, size of k-core -% -% References: e.g. Hagmann et al. (2008) PLoS Biology -% -% Olaf Sporns, Indiana University, 2007/2008/2010/2012 - -N = size(CIJ,1); - -% determine if the network is undirected - if not, compute coreness on the -% corresponding undirected network -CIJund = CIJ+CIJ'; -if (any(CIJund(:)>1)) - CIJ = double(CIJund>0); -end; - -coreness = zeros(1,N); kn = zeros(1,N); -for k=1:N - [CIJkcore,kn(k)] = kcore_bu(CIJ,k); - ss = sum(CIJkcore)>0; - coreness(ss) = k; -end; diff --git a/DefaultData/2019_03_03_BCT/latmio_dir.m b/DefaultData/2019_03_03_BCT/latmio_dir.m deleted file mode 100755 index 21dc2c2..0000000 --- a/DefaultData/2019_03_03_BCT/latmio_dir.m +++ /dev/null @@ -1,91 +0,0 @@ -function [Rlatt,Rrp,ind_rp,eff] = latmio_dir(R,ITER,D) -%LATMIO_DIR Lattice with preserved in/out degree distribution -% -% [Rlatt,Rrp,ind_rp,eff] = latmio_dir(R,ITER,D); -% -% This function "latticizes" a directed network, while preserving the in- -% and out-degree distributions. In weighted networks, the function -% preserves the out-strength but not the in-strength distributions. -% -% Input: R, directed (binary/weighted) connection matrix -% ITER, rewiring parameter -% (each edge is rewired approximately ITER times) -% D, distance-to-diagonal matrix -% -% Output: Rlatt, latticized network in original node ordering -% Rrp, latticized network in node ordering used for -% latticization -% ind_rp, node ordering used for latticization -% eff, number of actual rewirings carried out -% -% References: Maslov and Sneppen (2002) Science 296:910 -% Sporns and Zwi (2004) Neuroinformatics 2:145 -% -% Mika Rubinov, UNSW, 2007-2010 -% Olaf Sporns, IU, 2012 - -n=size(R,1); - -% randomly reorder matrix -ind_rp = randperm(n); -R = R(ind_rp,ind_rp); - -% create 'distance to diagonal' matrix -if nargin<3 %if D is not specified by user - D=zeros(n); - u=[0 min([mod(1:n-1,n);mod(n-1:-1:1,n)])]; - for v=1:ceil(n/2) - D(n-v+1,:)=u([v+1:n 1:v]); - D(v,:)=D(n-v+1,n:-1:1); - end -end -%end create - -[i,j]=find(R); -K=length(i); -ITER=K*ITER; - -% maximal number of rewiring attempts per 'iter' -maxAttempts= round(n*K/(n*(n-1))); -% actual number of successful rewirings -eff = 0; - -for iter=1:ITER - att=0; - while (att<=maxAttempts) %while not rewired - while 1 - e1=ceil(K*rand); - e2=ceil(K*rand); - while (e2==e1) - e2=ceil(K*rand); - end - a=i(e1); b=j(e1); - c=i(e2); d=j(e2); - - if all(a~=[c d]) && all(b~=[c d]) - break %all four vertices must be different - end - end - - %rewiring condition - if ~(R(a,d) || R(c,b)) - %lattice condition - if (D(a,b)*R(a,b)+D(c,d)*R(c,d))>=(D(a,d)*R(a,b)+D(c,b)*R(c,d)) - R(a,d)=R(a,b); R(a,b)=0; - R(c,b)=R(c,d); R(c,d)=0; - - j(e1) = d; %reassign edge indices - j(e2) = b; - eff = eff+1; - break; - end %lattice condition - end %rewiring condition - att=att+1; - end %while not rewired -end %iterations - -% lattice in node order used for latticization -Rrp = R; -% reverse random permutation of nodes -[~,ind_rp_reverse] = sort(ind_rp); -Rlatt = Rrp(ind_rp_reverse,ind_rp_reverse); \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/latmio_dir_connected.m b/DefaultData/2019_03_03_BCT/latmio_dir_connected.m deleted file mode 100755 index f15a599..0000000 --- a/DefaultData/2019_03_03_BCT/latmio_dir_connected.m +++ /dev/null @@ -1,119 +0,0 @@ -function [Rlatt,Rrp,ind_rp,eff] = latmio_dir_connected(R,ITER,D) -%LATMIO_DIR_CONNECTED Lattice with preserved in/out degree distribution -% -% [Rlatt,Rrp,ind_rp,eff] = latmio_dir_connected(R,ITER,D); -% -% This function "latticizes" a directed network, while preserving the in- -% and out-degree distributions. In weighted networks, the function -% preserves the out-strength but not the in-strength distributions. The -% function also ensures that the randomized network maintains -% connectedness, the ability for every node to reach every other node in -% the network. The input network for this function must be connected. -% -% Input: R, directed (binary/weighted) connection matrix -% ITER, rewiring parameter -% (each edge is rewired approximately ITER times) -% D, distance-to-diagonal matrix -% -% Output: Rlatt, latticized network in original node ordering -% Rrp, latticized network in node ordering used for -% latticization -% ind_rp, node ordering used for latticization -% eff, number of actual rewirings carried out -% -% References: Maslov and Sneppen (2002) Science 296:910 -% Sporns and Zwi (2004) Neuroinformatics 2:145 -% -% Mika Rubinov, UNSW, 2007-2010 -% Olaf Sporns, Indiana University, 2012 - -n=size(R,1); - -% randomly reorder matrix -ind_rp = randperm(n); -R = R(ind_rp,ind_rp); - -% create 'distance to diagonal' matrix -if nargin<3 %if D is not specified by user - D=zeros(n); - u=[0 min([mod(1:n-1,n);mod(n-1:-1:1,n)])]; - for v=1:ceil(n/2) - D(n-v+1,:)=u([v+1:n 1:v]); - D(v,:)=D(n-v+1,n:-1:1); - end -end -%end create - -[i,j]=find(R); -K=length(i); -ITER=K*ITER; - -% maximal number of rewiring attempts per 'iter' -maxAttempts= round(n*K/(n*(n-1))); -% actual number of successful rewirings -eff = 0; - -for iter=1:ITER - att=0; - while (att<=maxAttempts) %while not rewired - rewire=1; - while 1 - e1=ceil(K*rand); - e2=ceil(K*rand); - while (e2==e1) - e2=ceil(K*rand); - end - a=i(e1); b=j(e1); - c=i(e2); d=j(e2); - - if all(a~=[c d]) && all(b~=[c d]) - break %all four vertices must be different - end - end - - %rewiring condition - if ~(R(a,d) || R(c,b)) - %lattice condition - if (D(a,b)*R(a,b)+D(c,d)*R(c,d))>=(D(a,d)*R(a,b)+D(c,b)*R(c,d)) - %connectedness condition - if ~(any([R(a,c) R(d,b) R(d,c)]) && any([R(c,a) R(b,d) R(b,a)])) - P=R([a c],:); - P(1,b)=0; P(1,d)=1; - P(2,d)=0; P(2,b)=1; - PN=P; - PN(1,a)=1; PN(2,c)=1; - - while 1 - P(1,:)=any(R(P(1,:)~=0,:),1); - P(2,:)=any(R(P(2,:)~=0,:),1); - P=P.*(~PN); - PN=PN+P; - if ~all(any(P,2)) - rewire=0; - break - elseif any(PN(1,[b c])) && any(PN(2,[d a])) - break - end - end - end %connectedness testing - - if rewire %reassign edges - R(a,d)=R(a,b); R(a,b)=0; - R(c,b)=R(c,d); R(c,d)=0; - - j(e1) = d; %reassign edge indices - j(e2) = b; - eff = eff+1; - break; - end %edge reassignment - end %lattice condition - end %rewiring condition - att=att+1; - end %while not rewired -end %iterations - -% lattice in node order used for latticization -Rrp = R; -% reverse random permutation of nodes -[~,ind_rp_reverse] = sort(ind_rp); -Rlatt = Rrp(ind_rp_reverse,ind_rp_reverse); \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/latmio_und.m b/DefaultData/2019_03_03_BCT/latmio_und.m deleted file mode 100755 index 30f3804..0000000 --- a/DefaultData/2019_03_03_BCT/latmio_und.m +++ /dev/null @@ -1,109 +0,0 @@ -function [Rlatt,Rrp,ind_rp,eff] = latmio_und(R,ITER,D) -%LATMIO_UND Lattice with preserved degree distribution -% -% [Rlatt,Rrp,ind_rp,eff] = latmio_und(R,ITER,D); -% -% This function "latticizes" an undirected network, while preserving the -% degree distribution. The function does not preserve the strength -% distribution in weighted networks. -% -% Input: R, undirected (binary/weighted) connection matrix -% ITER, rewiring parameter -% (each edge is rewired approximately ITER times) -% D, distance-to-diagonal matrix -% -% Output: Rlatt, latticized network in original node ordering -% Rrp, latticized network in node ordering used for -% latticization -% ind_rp, node ordering used for latticization -% eff, number of actual rewirings carried out -% -% References: Maslov and Sneppen (2002) Science 296:910 -% Sporns and Zwi (2004) Neuroinformatics 2:145 -% -% 2007-2012 -% Mika Rubinov, UNSW -% Jonathan Power, WUSTL -% Olaf Sporns, IU - -% Modification History: -% Jun 2007: Original (Mika Rubinov) -% Apr 2008: Edge c-d is flipped with 50% probability, allowing to explore -% all potential rewirings (Jonathan Power) -% Feb 2012: limit on number of attempts, distance-to-diagonal as input, -% count number of successful rewirings (Olaf Sporns) -% Feb 2012: permute node ordering on each run, to ensure lattices are -% shuffled across mutliple runs (Olaf Sporns) - -n=size(R,1); - -% randomly reorder matrix -ind_rp = randperm(n); -R = R(ind_rp,ind_rp); - -% create 'distance to diagonal' matrix -if nargin<3 %if D is not specified by user - D=zeros(n); - u=[0 min([mod(1:n-1,n);mod(n-1:-1:1,n)])]; - for v=1:ceil(n/2) - D(n-v+1,:)=u([v+1:n 1:v]); - D(v,:)=D(n-v+1,n:-1:1); - end -end -%end create - -[i,j]=find(tril(R)); -K=length(i); -ITER=K*ITER; - -% maximal number of rewiring attempts per 'iter' -maxAttempts= round(n*K/(n*(n-1)/2)); -% actual number of successful rewirings -eff = 0; - -for iter=1:ITER - att=0; - while (att<=maxAttempts) %while not rewired - while 1 - e1=ceil(K*rand); - e2=ceil(K*rand); - while (e2==e1) - e2=ceil(K*rand); - end - a=i(e1); b=j(e1); - c=i(e2); d=j(e2); - - if all(a~=[c d]) && all(b~=[c d]) - break %all four vertices must be different - end - end - - if rand>0.5 - i(e2)=d; j(e2)=c; %flip edge c-d with 50% probability - c=i(e2); d=j(e2); %to explore all potential rewirings - end - - %rewiring condition - if ~(R(a,d) || R(c,b)) - %lattice condition - if (D(a,b)*R(a,b)+D(c,d)*R(c,d))>=(D(a,d)*R(a,b)+D(c,b)*R(c,d)) - R(a,d)=R(a,b); R(a,b)=0; - R(d,a)=R(b,a); R(b,a)=0; - R(c,b)=R(c,d); R(c,d)=0; - R(b,c)=R(d,c); R(d,c)=0; - - j(e1) = d; %reassign edge indices - j(e2) = b; - eff = eff+1; - break; - end %lattice condition - end %rewiring condition - att=att+1; - end %while not rewired -end %iterations - -% lattice in node order used for latticization -Rrp = R; -% reverse random permutation of nodes -[~,ind_rp_reverse] = sort(ind_rp); -Rlatt = Rrp(ind_rp_reverse,ind_rp_reverse); diff --git a/DefaultData/2019_03_03_BCT/latmio_und_connected.m b/DefaultData/2019_03_03_BCT/latmio_und_connected.m deleted file mode 100755 index 88e7220..0000000 --- a/DefaultData/2019_03_03_BCT/latmio_und_connected.m +++ /dev/null @@ -1,136 +0,0 @@ -function [Rlatt,Rrp,ind_rp,eff] = latmio_und_connected(R,ITER,D) -%LATMIO_UND_CONNECTED Lattice with preserved degree distribution -% -% [Rlatt,Rrp,ind_rp,eff] = latmio_und_connected(R,ITER,D); -% -% This function "latticizes" an undirected network, while preserving the -% degree distribution. The function does not preserve the strength -% distribution in weighted networks. The function also ensures that the -% randomized network maintains connectedness, the ability for every node -% to reach every other node in the network. The input network for this -% function must be connected. -% -% Input: R, undirected (binary/weighted) connection matrix -% ITER, rewiring parameter -% (each edge is rewired approximately ITER times) -% D, distance-to-diagonal matrix -% -% Output: Rlatt, latticized network in original node ordering -% Rrp, latticized network in node ordering used for -% latticization -% ind_rp, node ordering used for latticization -% eff, number of actual rewirings carried out -% -% References: Maslov and Sneppen (2002) Science 296:910 -% Sporns and Zwi (2004) Neuroinformatics 2:145 -% -% 2007-2012 -% Mika Rubinov, UNSW -% Jonathan Power, WUSTL -% Olaf Sporns, IU - -% Modification History: -% Jun 2007: Original (Mika Rubinov) -% Apr 2008: Edge c-d is flipped with 50% probability, allowing to explore -% all potential rewirings (Jonathan Power) -% Feb 2012: limit on number of attempts, distance-to-diagonal as input, -% count number of successful rewirings (Olaf Sporns) -% Feb 2012: permute node ordering on each run, to ensure lattices are -% shuffled across mutliple runs (Olaf Sporns) - -n=size(R,1); - -% randomly reorder matrix -ind_rp = randperm(n); -R = R(ind_rp,ind_rp); - -% create 'distance to diagonal' matrix -if nargin<3 %if D is not specified by user - D=zeros(n); - u=[0 min([mod(1:n-1,n);mod(n-1:-1:1,n)])]; - for v=1:ceil(n/2) - D(n-v+1,:)=u([v+1:n 1:v]); - D(v,:)=D(n-v+1,n:-1:1); - end -end -%end create - -[i,j]=find(tril(R)); -K=length(i); -ITER=K*ITER; - -% maximal number of rewiring attempts per 'iter' -maxAttempts= round(n*K/(n*(n-1)/2)); -% actual number of successful rewirings -eff = 0; - -for iter=1:ITER - att=0; - while (att<=maxAttempts) %while not rewired - rewire=1; - while 1 - e1=ceil(K*rand); - e2=ceil(K*rand); - while (e2==e1) - e2=ceil(K*rand); - end - a=i(e1); b=j(e1); - c=i(e2); d=j(e2); - - if all(a~=[c d]) && all(b~=[c d]) - break %all four vertices must be different - end - end - - if rand>0.5 - i(e2)=d; j(e2)=c; %flip edge c-d with 50% probability - c=i(e2); d=j(e2); %to explore all potential rewirings - end - - %rewiring condition - if ~(R(a,d) || R(c,b)) - %lattice condition - if (D(a,b)*R(a,b)+D(c,d)*R(c,d))>=(D(a,d)*R(a,b)+D(c,b)*R(c,d)) - %connectedness condition - if ~(R(a,c) || R(b,d)) - P=R([a d],:); - P(1,b)=0; P(2,c)=0; - PN=P; - PN(:,d)=1; PN(:,a)=1; - - while 1 - P(1,:)=any(R(P(1,:)~=0,:),1); - P(2,:)=any(R(P(2,:)~=0,:),1); - P=P.*(~PN); - if ~all(any(P,2)) - rewire=0; - break - elseif any(any(P(:,[b c]))) - break - end - PN=PN+P; - end - end %connectedness testing - - if rewire %reassign edges - R(a,d)=R(a,b); R(a,b)=0; - R(d,a)=R(b,a); R(b,a)=0; - R(c,b)=R(c,d); R(c,d)=0; - R(b,c)=R(d,c); R(d,c)=0; - - j(e1) = d; %reassign edge indices - j(e2) = b; - eff = eff+1; - break; - end %edge reassignment - end %lattice condition - end %rewiring condition - att=att+1; - end %while not rewired -end %iterations - -% lattice in node order used for latticization -Rrp = R; -% reverse random permutation of nodes -[~,ind_rp_reverse] = sort(ind_rp); -Rlatt = Rrp(ind_rp_reverse,ind_rp_reverse); diff --git a/DefaultData/2019_03_03_BCT/link_communities.m b/DefaultData/2019_03_03_BCT/link_communities.m deleted file mode 100755 index 46c8063..0000000 --- a/DefaultData/2019_03_03_BCT/link_communities.m +++ /dev/null @@ -1,150 +0,0 @@ -function M=link_communities(W,type_clustering) -%LINK_COMMUNITIES Optimal overlapping community structure -% -% M = link_communities(W) -% M = link_communities(W,'complete'); -% -% The optimal community structure is a subdivision of the network into -% groups of nodes which have a high number of within-group connections -% and a low number of between group connections. -% -% This algorithm uncovers overlapping community structure via -% hierarchical clustering of network links. This algorith is generalized -% for weighted/directed/fully-connected networks. -% -% Input: W, directed (weighted or binary) connection matrix. -% type_clustering, type of hierarchical clustering (optional) -% 'single' single-linkage (default) -% 'complete' complete-linkage -% -% Output: M, nodal community-affiliation matrix -% binary matrix of size CxN [communities x nodes] -% -% NB: The algorithm can be slow and memory intensive. -% -% Reference: Ahn, Bagrow and Lehmann (2010) Nature 466, 761–764. -% -% Mika Rubinov, U Cambridge, 2014-2015 - -%% initialize - -n=size(W,1); % number of nodes -W(1:n+1:end)=0; -W=W./max(W(:)); % normalize weights - -if ~exist('type_clustering','var') - type_clustering='single'; -end - -%% get node similarity - -W(1:n+1:end) = ( sum(W)/sum(W~=0) + sum(W.')/sum(W.'~=0) )/2; % mean weight on diagonal -No=sum(W.^2,2); % out-norm squared -Ni=sum(W.^2,1); % in-norm squared - -Jo=zeros(n); % weighted in-Jaccard -Ji=zeros(n); % weighted ou-Jaccard -for b=1:n - for c=1:n - Do=W(b,:)*W(c,:).'; - Jo(b,c)=Do./(No(b)+No(c)-Do); - - Di=W(:,b).'*W(:,c); - Ji(b,c)=Di./(Ni(b)+Ni(c)-Di); - end -end - -%% get link similarity - -[A,B]=find( (W|W.') & triu(ones(n),1)); -m=length(A); -Ln=zeros(m,2); % link nodes -Lw=zeros(m,1); % link weights -for i=1:m - Ln(i,:) = [A(i) B(i)]; % link nodes - Lw(i) = (W(A(i),B(i))+W(B(i),A(i)))/2; % link weight -end - -ES=zeros(m,m,'single'); % link similarity -for i=1:m - for j=1:m - if Ln(i,1)==Ln(j,1); a=Ln(i,1); b=Ln(i,2); c=Ln(j,2); - elseif Ln(i,1)==Ln(j,2); a=Ln(i,1); b=Ln(i,2); c=Ln(j,1); - elseif Ln(i,2)==Ln(j,1); a=Ln(i,2); b=Ln(i,1); c=Ln(j,2); - elseif Ln(i,2)==Ln(j,2); a=Ln(i,2); b=Ln(i,1); c=Ln(j,1); - else continue - end - - ES(i,j) = (W(a,b)*W(a,c)*Ji(b,c) + W(b,a)*W(c,a)*Jo(b,c))/2; - end -end -ES(1:m+1:end)=0; - -%% perform hierarchical clustering - -C=zeros(m,m,'single'); % community affiliation matrix -Nc=C; Mc=C; Dc=C; % communities nodes, links and density -U=1:m; % initial community assignments -C(1,:)=U; % as above, in the matrix - -for i=1:m-1; fprintf('hierarchy%8d\n',i) % hierarchy level - - % compute densities - for j=1:length(U) % loop over communities - idx = C(i,:)==U(j); % get link indices - links = sort(Lw(idx)); % sort link weights - nodes = sort(reshape(Ln(idx,:),2*nnz(idx),1)); - nodes = nodes([true;nodes(2:end)~=nodes(1:end-1)]); % get unique nodes - - nc = numel(nodes); % community nodes - mc = sum(links); % community weights - min_mc = sum(links(1:nc-1)); % minimal weight - dc = (mc - min_mc) / (nc.*(nc-1)/2 - min_mc); % community density - - Nc(i,j)=nc; - Mc(i,j)=mc; - Dc(i,j)=dc; - end - - % cluster - C(i+1,:)=C(i,:); % copy current partition - [u1,u2]=find(ES(U,U)==max(max(ES(U,U)))); % on this line MAXs MUST BE MAXs - - V=U(unique(sortrows(sort([u1 u2],2)),'rows')); % get unique links - for j=1:size(V,1) - switch type_clustering - case 'single'; x = max(ES(V(j,:),:),[],1); % max -> single linkage - case 'complete'; x = min(ES(V(j,:),:),[],1); % min -> complete linkage - otherwise; error('Unknown clustering type.'); - end - ES(V(j,:),:) = [x;x]; % assign distances to whole clusters - ES(:,V(j,:)) = [x;x].'; - ES(V(j,1),V(j,1)) = 0; % clear diagonal - ES(V(j,2),V(j,2)) = 0; % clear diagonal - - C(i+1,C(i+1,:)==V(j,2)) = V(j,1); % merge communities - V(V==V(j,2)) = V(j,1); % merge indices - end - - U=unique(C(i+1,:)); % get unique communities - if numel(U)==1 - break; - end -end - -%% - -Dc(isnan(Dc))=0; -[~,i]=max(sum(Dc.*Mc,2)); % get maximal density - -U=unique(C(i,:)); % unique communities -M=zeros(1,n); % nodal affiliations -for j=1:length(U) - M(j,unique( Ln(C(i,:)==U(j),:)) )=1; -end -M=M(sum(M,2)>2,:); - -% M2=zeros(n); % two dimensional nodal affiliation -% for i=1:size(M,1); -% M2=M2+(M(i,:).'*ones(1,n) & ones(n,1)*M(i,:)); -% end diff --git a/DefaultData/2019_03_03_BCT/local_assortativity_wu_sign.m b/DefaultData/2019_03_03_BCT/local_assortativity_wu_sign.m deleted file mode 100755 index 0f10967..0000000 --- a/DefaultData/2019_03_03_BCT/local_assortativity_wu_sign.m +++ /dev/null @@ -1,44 +0,0 @@ -function [loc_assort_pos,loc_assort_neg] = local_assortativity_wu_sign(W) -%LOCAL_ASSORTATIVITY_WU_SIGN Local Assortativity -% -% [loc_assort_pos,loc_assort_neg] = local_assortativity_wu_sign(W); -% -% Local Assortativity measures the extent to which nodes are connected to -% nodes of similar strength (vs. higher or lower strength). Adapted from -% Thedchanamoorthy et al. (2014)'s formula to allow weighted/signed -% networks (node degree replaced with node strength). Note, output values -% sum to total assortativity. -% -% Inputs: W, undirected connection matrix with positive and -% negative weights -% -% Output: loc_assort_pos, local assortativity from positive weights -% -% loc_assort_neg, local assortativity from negative weights -% -% Reference: Thedchanamoorthy G, Piraveenan M, Kasthuriratna D, -% Senanayake U. Proc Comp Sci (2014) 29:2449-2461. -% -% -% Jeff Spielberg, Boston University - -% Modification History: -% May 2015: Original - -W(1:(size(W,1)+1):end) = 0; -r_pos = assortativity_wei(W.*(W>0),0); -r_neg = assortativity_wei(-W.*(W<0),0); -[str_pos,str_neg] = strengths_und_sign(W); -loc_assort_pos = nan(size(W,1),1); -loc_assort_neg = nan(size(W,1),1); - -for curr_node = 1:size(W,1) - [~,j_pos] = find(W(curr_node,:)>0); - loc_assort_pos(curr_node,1) = sum(abs(str_pos(j_pos)-str_pos(curr_node)))/str_pos(curr_node); - - [~,j_neg] = find(W(curr_node,:)<0); - loc_assort_neg(curr_node,1) = sum(abs(str_neg(j_neg)-str_neg(curr_node)))/str_neg(curr_node); -end - -loc_assort_pos = ((r_pos+1)/size(W,1))-(loc_assort_pos/sum(loc_assort_pos)); -loc_assort_neg = ((r_neg+1)/size(W,1))-(loc_assort_neg/sum(loc_assort_neg)); diff --git a/DefaultData/2019_03_03_BCT/make_motif34lib.m b/DefaultData/2019_03_03_BCT/make_motif34lib.m deleted file mode 100755 index 9b2708c..0000000 --- a/DefaultData/2019_03_03_BCT/make_motif34lib.m +++ /dev/null @@ -1,90 +0,0 @@ -function make_motif34lib -%MAKE_MOTIF34LIB Auxiliary motif library function -% -% make_motif34lib; -% -% This function generates the motif34lib.mat library required for all -% other motif computations. -% -% -% Mika Rubinov, UNSW, 2007-2010 - -%#ok<*ASGLU> - -[M3,M3n,ID3,N3]=motif3generate; -[M4,M4n,ID4,N4]=motif4generate; -save motif34lib; - -function [M,Mn,ID,N]=motif3generate -n=0; -M=false(54,6); %isomorphs -CL=zeros(54,6,'uint8'); %canonical labels (predecessors of IDs) -cl=zeros(1,6,'uint8'); -for i=0:2^6-1 %loop through all subgraphs - m=dec2bin(i); - m=[num2str(zeros(1,6-length(m)), '%d') m]; %#ok - G=str2num ([ ... - '0' ' ' m(3) ' ' m(5) ; - m(1) ' ' '0' ' ' m(6) ; - m(2) ' ' m(4) ' ' '0' ]); %#ok - Ko=sum(G,2); - Ki=sum(G,1).'; - if all(Ko|Ki) %if subgraph weakly-connected - n=n+1; - cl(:)=sortrows([Ko Ki]).'; - CL(n,:)=cl; %assign motif label to isomorph - M(n,:)=G([2:4 6:8]); - end -end -[u1,u2,ID]=unique(CL,'rows'); %convert CLs into motif IDs - -%convert IDs into Sporns & Kotter classification -id_mika= [1 3 4 6 7 8 11]; -id_olaf= -[3 6 1 11 4 7 8]; -for id=1:length(id_mika) - ID(ID==id_mika(id))=id_olaf(id); -end -ID=abs(ID); - -[X,ind]=sortrows(ID); -ID=ID(ind,:); %sort IDs -M=M(ind,:); %sort isomorphs -N=sum(M,2); %number of edges -Mn=uint32(sum(repmat(10.^(5:-1:0),size(M,1),1).*M,2)); %M as a single number - -function [M,Mn,ID,N]=motif4generate -n=0; -M=false(3834,12); %isomorphs -CL=zeros(3834,16,'uint8'); %canonical labels (predecessors of IDs) -cl=zeros(1,16,'uint8'); -for i=0:2^12-1 %loop through all subgraphs - m=dec2bin(i); - m=[num2str(zeros(1,12-length(m)), '%d') m]; %#ok - G=str2num ([ ... - '0' ' ' m(4) ' ' m(7) ' ' m(10) ; - m(1) ' ' '0' ' ' m(8) ' ' m(11) ; - m(2) ' ' m(5) ' ' '0' ' ' m(12) ; - m(3) ' ' m(6) ' ' m(9) ' ' '0' ]); %#ok - Gs=G+G.'; - v=Gs(1,:); - for j=1:2 - v=any(Gs(v~=0,:),1)+v; - end - if v %if subgraph weakly connected - n=n+1; - G2=(G*G)~=0; - Ko=sum(G,2); - Ki=sum(G,1).'; - Ko2=sum(G2,2); - Ki2=sum(G2,1).'; - cl(:)=sortrows([Ki Ko Ki2 Ko2]).'; - CL(n,:)=cl; %assign motif label to isomorph - M(n,:)=G([2:5 7:10 12:15]); - end -end -[u1,u2,ID]=unique(CL,'rows'); %convert CLs into motif IDs -[X,ind]=sortrows(ID); -ID=ID(ind,:); %sort IDs -M=M(ind,:); %sort isomorphs -N=sum(M,2); %number of edges -Mn=uint64(sum(repmat(10.^(11:-1:0),size(M,1),1).*M,2)); %M as a single number \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/makeevenCIJ.m b/DefaultData/2019_03_03_BCT/makeevenCIJ.m deleted file mode 100755 index ae95f12..0000000 --- a/DefaultData/2019_03_03_BCT/makeevenCIJ.m +++ /dev/null @@ -1,75 +0,0 @@ -function [CIJ] = makeevenCIJ(N,K,sz_cl) -%MAKEEVENCIJ Synthetic modular small-world network -% -% CIJ = makeevenCIJ(N,K,sz_cl); -% -% This function generates a random, directed network with a specified -% number of fully connected modules linked together by evenly distributed -% remaining random connections. -% -% Inputs: N, number of vertices (must be power of 2) -% K, number of edges -% sz_cl, size of clusters (power of 2) -% -% Outputs: CIJ, connection matrix -% -% Notes: N must be a power of 2. -% A warning is generated if all modules contain more edges than K. -% Cluster size is 2^sz_cl; -% -% -% Olaf Sporns, Indiana University, 2005/2007 - -% compute number of hierarchical levels and adjust cluster size -mx_lvl = floor(log2(N)); -sz_cl = sz_cl-1; - -% make a stupid little template -t = ones(2).*2; - -% check N against number of levels -Nlvl = 2^mx_lvl; -if (Nlvl~=N) - disp('Warning: N must be a power of 2'); -end; -N = Nlvl; - -% create hierarchical template -for lvl=1:mx_lvl-1 - CIJ = ones(2^(lvl+1),2^(lvl+1)); - group1 = 1:size(CIJ,1)/2; - group2 = size(CIJ,1)/2+1:size(CIJ,1); - CIJ(group1,group1) = t; - CIJ(group2,group2) = t; - CIJ = CIJ+ones(size(CIJ,1),size(CIJ,1)); - t = CIJ; -end; -s = size(CIJ,1); -CIJ = CIJ-ones(s,s)-mx_lvl.*eye(s); - -% assign connection probabilities -%CIJp = mx_lvl-CIJ-sz_cl; -%CIJp = (CIJp>0).*CIJp; -CIJp = (CIJ>=(mx_lvl-sz_cl)); - -% determine number of remaining (non-cluster) connections and their -% possible positions -%CIJc = (CIJp==0); -CIJc = (CIJp==1); -remK = K-nnz(CIJc); -if (remK<0) - disp('Warning: K is too small, output matrix contains clusters only'); -end; -[a,b] = find(~(CIJc+eye(N))); - -% assign 'remK' randomly distributed connections -rp = randperm(length(a)); -a = a(rp(1:remK)); -b = b(rp(1:remK)); -for i=1:remK - CIJc(a(i),b(i)) = 1; -end; - -% prepare for output -CIJ = CIJc; - diff --git a/DefaultData/2019_03_03_BCT/makefractalCIJ.m b/DefaultData/2019_03_03_BCT/makefractalCIJ.m deleted file mode 100755 index f410586..0000000 --- a/DefaultData/2019_03_03_BCT/makefractalCIJ.m +++ /dev/null @@ -1,49 +0,0 @@ -function [CIJ,K] = makefractalCIJ(mx_lvl,E,sz_cl) -%MAKEFRACTALCIJ Synthetic hierarchical modular network -% -% [CIJ,K] = makefractalCIJ(mx_lvl,E,sz_cl); -% -% This function generates a directed network with a hierarchical modular -% organization. All modules are fully connected and connection density -% decays as 1/(E^n), with n = index of hierarchical level. -% -% Inputs: mx_lvl, number of hierarchical levels, N = 2^mx_lvl -% E, connection density fall-off per level -% sz_cl, size of clusters (power of 2) -% -% Outputs: CIJ, connection matrix -% K, number of connections present in the output CIJ -% -% -% Olaf Sporns, Indiana University, 2005/2007 - -% make a little template -t = ones(2).*2; - -% compute N and cluster size -N = 2^mx_lvl; -sz_cl = sz_cl-1; - -% n = [0 0 0:mx_lvl-3]; - -for lvl=1:mx_lvl-1 - CIJ = ones(2^(lvl+1),2^(lvl+1)); - group1 = 1:size(CIJ,1)/2; - group2 = size(CIJ,1)/2+1:size(CIJ,1); - CIJ(group1,group1) = t; - CIJ(group2,group2) = t; - CIJ = CIJ+ones(size(CIJ,1),size(CIJ,1)); - t = CIJ; -end; -s = size(CIJ,1); -CIJ = CIJ-ones(s,s)-mx_lvl.*eye(s); - -% assign connection probablities -ee = mx_lvl-CIJ-sz_cl; -ee = (ee>0).*ee; -prob = (1./(E.^ee)).*(ones(s,s)-eye(s)); -CIJ = (prob>rand(N)); - -% count connections -K = sum(sum(CIJ)); - diff --git a/DefaultData/2019_03_03_BCT/makelatticeCIJ.m b/DefaultData/2019_03_03_BCT/makelatticeCIJ.m deleted file mode 100755 index ae7e1c5..0000000 --- a/DefaultData/2019_03_03_BCT/makelatticeCIJ.m +++ /dev/null @@ -1,45 +0,0 @@ -function [CIJ] = makelatticeCIJ(N,K) -%MAKELATTICECIJ Synthetic lattice network -% -% CIJ = makelatticeCIJ(N,K); -% -% This function generates a directed lattice network without toroidal -% boundary counditions (i.e. no ring-like "wrapping around"). -% -% Inputs: N, number of vertices -% K, number of edges -% -% Outputs: CIJ, connection matrix -% -% Note: The lattice is made by placing connections as close as possible -% to the main diagonal, without wrapping around. No connections are made -% on the main diagonal. In/Outdegree is kept approx. constant at K/N. -% -% -% Olaf Sporns, Indiana University, 2005/2007 - -% initialize -CIJ = zeros(N); -CIJ1 = ones(N); -KK = 0; -cnt = 0; -seq = 1:N-1; - -% fill in -while (KK0) - [i,j] = find(dCIJ); - rp = randperm(length(i)); - for ii=1:overby - CIJ(i(rp(ii)),j(rp(ii))) = 0; - end; -end; diff --git a/DefaultData/2019_03_03_BCT/makerandCIJ_dir.m b/DefaultData/2019_03_03_BCT/makerandCIJ_dir.m deleted file mode 100755 index b1e07bd..0000000 --- a/DefaultData/2019_03_03_BCT/makerandCIJ_dir.m +++ /dev/null @@ -1,24 +0,0 @@ -function [CIJ] = makerandCIJ_dir(N,K) -%MAKERANDCIJ_DIR Synthetic directed random network -% -% CIJ = makerandCIJ_dir(N,K); -% -% This function generates a directed random network -% -% Inputs: N, number of vertices -% K, number of edges -% -% Output: CIJ, directed random connection matrix -% -% Note: no connections are placed on the main diagonal. -% -% -% Olaf Sporns, Indiana University, 2007/2008 - -ind = ~eye(N); -i = find(ind); -rp = randperm(length(i)); -irp = i(rp); - -CIJ = zeros(N); -CIJ(irp(1:K)) = 1; diff --git a/DefaultData/2019_03_03_BCT/makerandCIJ_und.m b/DefaultData/2019_03_03_BCT/makerandCIJ_und.m deleted file mode 100755 index e03dbf1..0000000 --- a/DefaultData/2019_03_03_BCT/makerandCIJ_und.m +++ /dev/null @@ -1,25 +0,0 @@ -function [CIJ] = makerandCIJ_und(N,K) -%MAKERANDCIJ_UND Synthetic directed random network -% -% CIJ = makerandCIJ_und(N,K); -% -% This function generates an undirected random network -% -% Inputs: N, number of vertices -% K, number of edges -% -% Output: CIJ, undirected random connection matrix -% -% Note: no connections are placed on the main diagonal. -% -% -% Olaf Sporns, Indiana University, 2007/2008 - -ind = triu(~eye(N)); -i = find(ind); -rp = randperm(length(i)); -irp = i(rp); - -CIJ = zeros(N); -CIJ(irp(1:K)) = 1; -CIJ = CIJ+CIJ'; % symmetrize diff --git a/DefaultData/2019_03_03_BCT/makerandCIJdegreesfixed.m b/DefaultData/2019_03_03_BCT/makerandCIJdegreesfixed.m deleted file mode 100755 index e163a3d..0000000 --- a/DefaultData/2019_03_03_BCT/makerandCIJdegreesfixed.m +++ /dev/null @@ -1,81 +0,0 @@ -function [cij,flag] = makerandCIJdegreesfixed(in,out) -%MAKERANDCIJDEGREESFIXED Synthetic directed random network -% -% CIJ = makerandCIJdegreesfixed(N,K); -% -% This function generates a directed random network with a specified -% in-degree and out-degree sequence. The function returns a flag, -% denoting whether the algorithm succeeded or failed. -% -% Inputs: in, indegree vector -% out, outdegree vector -% -% Output: CIJ, binary directed connectivity matrix -% flag, flag=1 if the algorithm succeeded; flag=0 otherwise -% -% -% Notes: Necessary conditions include: -% length(in) = length(out) = n -% sum(in) = sum(out) = k -% in(i), out(i) < n-1 -% in(i) + out(j) < n+2 -% in(i) + out(i) < n -% -% No connections are placed on the main diagonal -% -% -% Aviad Rubinstein, Indiana University 2005/2007 - -% intialize -n = length(in); -k = sum(in); -inInv = zeros(k,1); -outInv = inInv; -iIn = 1; iOut = 1; - -for i = 1:n - inInv(iIn:iIn+in(i) - 1) = i; - outInv(iOut:iOut+out(i) - 1) = i; - iIn = iIn+in(i); - iOut = iOut+out(i); -end - -cij = eye(n); -edges = [outInv(1:k)'; inInv(randperm(k))']; - -% create cij, and check for double edges and self-connections -for i = 1:k - if cij(edges(1,i),edges(2,i)) - warningCounter = 1; - while (1) - switchTo = ceil(k*rand); - if ~(cij(edges(1,i),edges(2,switchTo)) || cij(edges(1,switchTo),edges(2,i))) - cij(edges(1,i),edges(2,switchTo)) = 1; - if switchTo < i - cij(edges(1,switchTo),edges(2,switchTo)) = 0; - cij(edges(1,switchTo),edges(2,i)) = 1; - end - temp = edges(2,i); - edges(2,i) = edges(2,switchTo); - edges(2,switchTo) = temp; - break - end - warningCounter = warningCounter+1; - % If there is a legitimate subtitution, it has a probability of 1/k of being done. - % Thus it is highly unlikely that it will not be done after 2*k^2 attempts. - % This is an indication that the given indegree / outdegree - % vectors may not be possible. - if warningCounter == 2*k^2 - flag = 0; % no valid solution found - return; - end - end - else - cij(edges(1,i),edges(2,i)) = 1; - end -end - -cij = cij - eye(n); - -% a valid solution was found -flag = 1; \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/makeringlatticeCIJ.m b/DefaultData/2019_03_03_BCT/makeringlatticeCIJ.m deleted file mode 100755 index 9faf359..0000000 --- a/DefaultData/2019_03_03_BCT/makeringlatticeCIJ.m +++ /dev/null @@ -1,47 +0,0 @@ -function [CIJ] = makeringlatticeCIJ(N,K) -%MAKERINGLATTICECIJ Synthetic lattice network -% -% CIJ = makeringlatticeCIJ(N,K); -% -% This function generates a directed lattice network with toroidal -% boundary counditions (i.e. with ring-like "wrapping around"). -% -% Inputs: N, number of vertices -% K, number of edges -% -% Outputs: CIJ, connection matrix -% -% Note: The lattice is made by placing connections as close as possible -% to the main diagonal, with wrapping around. No connections are made -% on the main diagonal. In/Outdegree is kept approx. constant at K/N. -% -% -% Olaf Sporns, Indiana University, 2005/2007 - -% initialize -CIJ = zeros(N); -CIJ1 = ones(N); -KK = 0; -cnt = 0; -seq = 1:N-1; -seq2 = N-1:-1:1; - -% fill in -while (KK0) - [i,j] = find(dCIJ); - rp = randperm(length(i)); - for ii=1:overby - CIJ(i(rp(ii)),j(rp(ii))) = 0; - end; -end; diff --git a/DefaultData/2019_03_03_BCT/maketoeplitzCIJ.m b/DefaultData/2019_03_03_BCT/maketoeplitzCIJ.m deleted file mode 100755 index baf9424..0000000 --- a/DefaultData/2019_03_03_BCT/maketoeplitzCIJ.m +++ /dev/null @@ -1,29 +0,0 @@ -function [CIJ] = maketoeplitzCIJ(N,K,s) -%MAKETOEPLITZCIJ A synthetic directed network with Gaussian drop-off of -% connectivity with distance -% -% CIJ = maketoeprandCIJ(N,K,s) -% -% This function generates a directed network with a Gaussian drop-off in -% edge density with increasing distance from the main diagonal. There are -% toroidal boundary counditions (i.e. no ring-like "wrapping around"). -% -% Inputs: N, number of vertices -% K, number of edges -% s, standard deviation of toeplitz -% -% Output: CIJ, connection matrix -% -% Note: no connections are placed on the main diagonal. -% -% -% Olaf Sporns, Indiana University, 2005/2007 - -profile = normpdf(1:N-1,0.5,s); -template = toeplitz([0 profile],[0 profile]); -template = template.*(K./sum(sum(template))); -CIJ = zeros(N); - -while ((sum(sum(CIJ)) ~= K)) - CIJ = (rand(N)tol - error('cannot find eigenvalue of 1. Minimum eigenvalue value is %0.6f. Tolerance was set at %0.6f',aux(index)+1,tol); -end - -w = V(:,index)'; %left-eigen vector associated to eigenvalue of 1. -w = w/sum(w); %rescale of left-eigen vector to the sum of it (hence is now in probabilites form. The inverse of this vector is the return-times vector - -W = repmat(w,n,1); %convert column-vector w to a full matrix W by making copies of w. -I = eye(n,n); %Identity matrix I is computed - -Z = inv(I-P+W); %Fundamental matrix Z is computed - -MFPT = (repmat(diag(Z)',n,1)-Z)./W; % this performs MFPT(i,j)=(Z(j,j)-Z(i,j))/w(j) in a matricial way. Corresponds to theorem 11.16 pag. 459 -% r = 1./w; %as demostrated in theorem 11.15 pag. 455. Each entry r_i is the 'mean-recurrence' or 'return-time' of state i (node i when states represent nodes of a graph) \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/mleme_constraint_model.m b/DefaultData/2019_03_03_BCT/mleme_constraint_model.m deleted file mode 100755 index c666737..0000000 --- a/DefaultData/2019_03_03_BCT/mleme_constraint_model.m +++ /dev/null @@ -1,284 +0,0 @@ -function [W0, E0, P0, Delt0] = mleme_constraint_model(samp, W, M, Lo, Li, Lm, opts) -%MLEME_CONSTRAINT_MODEL Unbiased sampling of networks with soft constraints -% -% W0 = mleme_constraint_model(samp, W); -% W0 = mleme_constraint_model(samp, W, M); -% W0 = mleme_constraint_model(samp, W, M, Lo, Li, Lm); -% [W0, E0, P0, Delt0] = mleme_constraint_model(samp, W, M, Lo, Li, Lm, opts); -% -% This function returns an ensemble of unbiasedly sampled networks with -% weighted node-strength and module-weight constraints. These constraints -% are soft in that they are satisfied on average for the full network -% ensemble but not, in general, for each individual network. -% -% Inputs (for a network with n nodes, m modules and c constraints): -% -% samp, Number of networks to sample. -% -% W, (length n) square directed and weighted connectivity -% matrix. All weights must be nonnegative integers. Note that -% real-valued weights may be converted to integers with -% arbitrary precision through rescaling and rounding, e.g. -% W_int = round(10^precision * W_real). -% -% M, (length n) module affiliation vector. This vector is often -% obtained as the output of a community detection algorithm. -% The vector must contain nonnegative integers, with zeros -% specifying nodes which are not part of any community. This -% input may be left empty if there are no module constraints. -% -% Lo, (length n) out-strength constraint logical vector. This -% vector specifies out-strength constraints for each node. -% Alternatively, it is possible to specify 1 to constrain all -% out-strengths or 0 for no constraints. Empty or no input -% results in default behavour (no constraints). -% -% Lo, (length n) in-strength constraint logical vector. This -% vector specifies in-strength constraints for each node. -% Alternatively, it is possible to specify 1 to constrain all -% in-strengths or 0 for no constraints. Empty or no input -% results in default behavour (no constraints). -% -% Lm, (length m) module-weight constraint logical matrix. This -% matrix specifies module-weight constraints for all pairs of -% modules. Alternatively, it is possible to specify -% 2 to constrain all inter-module and intra-module weights, -% 1 to constrain all intra-module weights, or 0 for no -% constraints. Empty or no input results in default behavour -% (no constraints). -% -% opts, optional argument: pass optimization and display options with optimset. -% Default: optimset('MaxFunEvals', 1e6*c, 'MaxIter', 1e6, 'Display', 'iter'); -% -% -% Outputs: -% W0, an ensemble of sampled networks with constraints. -% -% E0, expected weights matrix. -% -% P0, probability matrix. -% -% Delt0, algorithm convergence error. -% -% -% Algorithm: -% Maximum-likelihood estimation of network probability -% distribution by numerical solution of systems of nonlinear -% equations, and sampling of individual networks directly -% from this distribution. -% -% -% Notes: -% Empirical connection weights are -% not preserved. Constraint errors are guaranteed to vanish -% in the limit of the full network ensemble. -% -% -% Examples: -% % get community structure of a weighted network W -% M = community_louvain(W, 2); -% -% % specify node and module constraints -% n = length(W); % number of nodes -% m = max(M); % number of modules -% Lo = true(n, 1); % out-strength constraints -% Li = true(n, 1); % in-strength constraints -% Lm = eye(m); % module-weight constraints -% -% % sample networks with the above constraints -% [W0, E0, P0, Delt0] = mleme_constraint_model(samp, W, M, Lo, Li, Lm); -% -% % equivalent formulation -% [W0, E0, P0, Delt0] = mleme_constraint_model(samp, W, M, 1, 1, 1); -% -% % alternative: sample networks with average weight constraints only -% [W0, E0, P0, Delt0] = mleme_constraint_model(samp, W); -% -% -% References: Squartini and Garlaschelli (2011) New J Phys 13:083001 -% Rubinov (2016) Nat Commun 7:13812 -% -% -% 2016, Mika Rubinov, Janelia HHMI - -% Modification History -% Dec 2016: Original. - -n = length(W); % number of nodes - -if ~exist('M', 'var') || isempty(M) - if exist('Lm', 'var') && any(Lm) - error('Need module affiliation vector for module constraints') - else - M = zeros(n, 1); - end -end - -m = max(M); % number of modules - -if ~isequal(W, int64(W)) || min(W(:))<0 - error('W must only contain nonnegative integers.') -end -if ~isequal(M, int64(M)) || min(M(:))<0 - error('M must only contain nonnegative integers.') -end - -% process node constraints -if ~exist('Lo','var') || isempty(Lo) || isequal(Lo,0) - Lo = false(n, 1); -elseif isequal(Lo, 1) - Lo = true(n, 1); -end -if ~exist('Li','var') - Li = Lo; -elseif isempty(Li) || isequal(Li, 0) - Li = false(n, 1); -elseif isequal(Li, 1) - Li = true(n, 1); -end - -% process module constraints -if ~exist('Lm','var') || isempty(Lm) || isequal(Lm,0) - Lm = false(m); -elseif isequal(Lm, 2) - Lm = true(m); -elseif isequal(Lm, 1) - Lm = diag(true(m, 1)); -end -if any(~M) - m = m + 1; - M(~M) = m; - Lm(m, m) = 0; % add a new row and column for nodes without modules -end - -Lo = logical(Lo(:)); -Li = logical(Li(:)); -Lm = logical(Lm(:)); -ao = numel(Lo); -ai = numel(Li); -am = numel(Lm); -uo = nnz(Lo); -ui = nnz(Li); -um = nnz(Lm); -Mij = bsxfun(@plus, M, (M.'-1)*m); - -f_ex = @(V) system_equations(V, Mij, Lo, Li, Lm, ao, ai, am, uo, ui, um); -f_cx = @(W) system_constraints(W, M, Lo, Li, Lm, uo, ui, um); - -C = f_cx(W); -c = 1 + uo + ui + um; -if ~exist('V','var') - V = mean2(W)/(1+mean2(W))*ones(c,1); -end - -assert(c == numel(C)); -assert(c == numel(V)); - -if ~exist('opts', 'var') || isempty(opts) - opts = optimset('MaxFunEvals', 1e6*c, 'MaxIter', 1e6, 'Display', 'iter'); -end - -V0 = fsolve(@(V) C - f_cx(f_ex(V)), V, opts); - -[E0, P0] = f_ex(V0); -Delt0 = C - f_cx(f_ex(V0)); - -W0 = sample_networks(P0, samp); - -end - - -function CellW0 = sample_networks(P0, samp) - -if ~exist('samp', 'var') - samp = 1; -end - -n = length(P0); - -CellW0 = cell(samp, 1); -for i = 1:samp - W0 = zeros(n); - L0 = ~eye(n); - l0 = nnz(L0); - while l0 - L0(L0) = P0(L0) > rand(l0,1); - W0(L0) = W0(L0) + 1; - l0 = nnz(L0); - end - CellW0{i} = W0; -end - -end - - -function [W, P] = system_equations(V, Mij, Lo, Li, Lm, ao, ai, am, uo, ui, um) - -X = ones(ao, 1); -Y = ones(ai, 1); -Z = ones(am, 1); - -if uo - offset = 1; - X(Lo) = V(offset + (1:uo)); -end -if ui - offset = 1 + uo; - Y(Li) = V(offset + (1:ui)); -end -if um - offset = 1 + uo + ui; - Z(Lm) = V(offset + (1:um)); -end - -P = V(1) .* (X * Y.') .* Z(Mij); % V(1) is the total weight -P(P>1) = 1 - eps; - -W = P ./ (1 - P); -W(1:length(W)+1:end) = 0; - -end - - -function C = system_constraints(W, M, Lo, Li, Lm, uo, ui, um) - -if nargin == 0 - C = @block_density; - return; -end - -if uo - So = sum(W(Lo,:), 2); -else - So = []; -end -if ui - Si = sum(W(:,Li), 1).'; -else - Si = []; -end -if um - Wm = block_density(W, M, Lm); -else - Wm = []; -end - -C = [sum(sum(W)); So; Si; Wm]; - -end - - -function Wm = block_density(W, M, Lwm) - -m = max(M); - -Wm = zeros(m*m, 1); -for u = 1:m - for v = 1:m - Wm(u + (v-1)*m) = sum(sum(W(M==u, M==v))); - end -end - -Wm = Wm(Lwm); - -end diff --git a/DefaultData/2019_03_03_BCT/modularity_dir.m b/DefaultData/2019_03_03_BCT/modularity_dir.m deleted file mode 100755 index acf0fff..0000000 --- a/DefaultData/2019_03_03_BCT/modularity_dir.m +++ /dev/null @@ -1,124 +0,0 @@ -function [Ci,Q]=modularity_dir(A,gamma) -%MODULARITY_DIR Optimal community structure and modularity -% -% Ci = modularity_dir(W); -% [Ci Q] = modularity_dir(W); -% -% The optimal community structure is a subdivision of the network into -% nonoverlapping groups of nodes in a way that maximizes the number of -% within-group edges, and minimizes the number of between-group edges. -% The modularity is a statistic that quantifies the degree to which the -% network may be subdivided into such clearly delineated groups. -% -% Inputs: -% W, -% directed weighted/binary connection matrix -% gamma, -% resolution parameter (optional) -% gamma>1, detects smaller modules -% 0<=gamma<1, detects larger modules -% gamma=1, classic modularity (default) -% -% Outputs: -% Ci, optimal community structure -% Q, maximized modularity -% -% Note: -% This algorithm is essentially deterministic. The only potential -% source of stochasticity occurs at the iterative finetuning step, in -% the presence of non-unique optimal swaps. However, the present -% implementation always makes the first available optimal swap and -% is therefore deterministic. -% -% References: -% Leicht and Newman (2008) Phys Rev Lett 100:118703. -% Reichardt and Bornholdt (2006) Phys Rev E 74:016110. -% -% 2008-2016 -% Mika Rubinov, UNSW -% Jonathan Power, WUSTL -% Dani Bassett, UCSB -% Xindi Wang, Beijing Normal University -% Roan LaPlante, Martinos Center for Biomedical Imaging - -% Modification History: -% Jul 2008: Original (Mika Rubinov) -% Oct 2008: Positive eigenvalues made insufficient for division (Jonathan Power) -% Dec 2008: Fine-tuning made consistent with Newman's description (Jonathan Power) -% Dec 2008: Fine-tuning vectorized (Mika Rubinov) -% Sep 2010: Node identities permuted (Dani Bassett) -% Dec 2013: Gamma resolution parameter included (Mika Rubinov) -% Dec 2013: Detection of maximum real part of eigenvalues enforced (Mika Rubinov) -% Thanks to Mason Porter and Jack Setford, University of Oxford -% Dec 2015: Single moves during fine-tuning enforced (Xindi Wang) -% Jan 2017: Removed node permutation and updated documentation (Roan LaPlante) - -if ~exist('gamma','var') - gamma = 1; -end - -N=length(A); %number of vertices -% n_perm = randperm(N); %DB: randomly permute order of nodes -% A = A(n_perm,n_perm); %DB: use permuted matrix for subsequent analysis -Ki=sum(A,1); %in-degree -Ko=sum(A,2); %out-degree -m=sum(Ki); %number of edges -b=A-gamma*(Ko*Ki).'/m; -B=b+b.'; %directed modularity matrix -Ci=ones(N,1); %community indices -cn=1; %number of communities -U=[1 0]; %array of unexamined communites - -ind=1:N; -Bg=B; -Ng=N; - -while U(1) %examine community U(1) - [V,D]=eig(Bg); - [~,i1]=max(real(diag(D))); %maximal positive (real part of) eigenvalue of Bg - v1=V(:,i1); %corresponding eigenvector - - S=ones(Ng,1); - S(v1<0)=-1; - q=S.'*Bg*S; %contribution to modularity - - if q>1e-10 %contribution positive: U(1) is divisible - qmax=q; %maximal contribution to modularity - Bg(logical(eye(Ng)))=0; %Bg is modified, to enable fine-tuning - indg=ones(Ng,1); %array of unmoved indices - Sit=S; - while any(indg) %iterative fine-tuning - Qit=qmax-4*Sit.*(Bg*Sit); %this line is equivalent to: - [qmax,imax]=max(Qit.*indg); %for i=1:Ng - Sit(imax)=-Sit(imax); % Sit(i)=-Sit(i); - indg(imax)=nan; % Qit(i)=Sit.'*Bg*Sit; - if qmax>q % Sit(i)=-Sit(i); - q=qmax; %end - S=Sit; - end - end - - if abs(sum(S))==Ng %unsuccessful splitting of U(1) - U(1)=[]; - else - cn=cn+1; - Ci(ind(S==1))=U(1); %split old U(1) into new U(1) and into cn - Ci(ind(S==-1))=cn; - U=[cn U]; %#ok - end - else %contribution nonpositive: U(1) is indivisible - U(1)=[]; - end - - ind=find(Ci==U(1)); %indices of unexamined community U(1) - bg=B(ind,ind); - Bg=bg-diag(sum(bg)); %modularity matrix for U(1) - Ng=length(ind); %number of vertices in U(1) -end - -s=Ci(:,ones(1,N)); %compute modularity -Q=~(s-s.').*B/(2*m); -Q=sum(Q(:)); -% Ci_corrected = zeros(N,1); % DB: initialize Ci_corrected -% Ci_corrected(n_perm) = Ci; % DB: return order of nodes to the order used at the input stage. -% Ci = Ci_corrected; % DB: output corrected community assignments diff --git a/DefaultData/2019_03_03_BCT/modularity_und.m b/DefaultData/2019_03_03_BCT/modularity_und.m deleted file mode 100755 index ad33fe0..0000000 --- a/DefaultData/2019_03_03_BCT/modularity_und.m +++ /dev/null @@ -1,122 +0,0 @@ -function [Ci,Q]=modularity_und(A,gamma) -%MODULARITY_UND Optimal community structure and modularity -% -% Ci = modularity_und(W); -% [Ci Q] = modularity_und(W,gamma); -% -% The optimal community structure is a subdivision of the network into -% nonoverlapping groups of nodes in a way that maximizes the number of -% within-group edges, and minimizes the number of between-group edges. -% The modularity is a statistic that quantifies the degree to which the -% network may be subdivided into such clearly delineated groups. -% -% Inputs: -% W, -% undirected weighted/binary connection matrix -% gamma, -% resolution parameter (optional) -% gamma>1, detects smaller modules -% 0<=gamma<1, detects larger modules -% gamma=1, classic modularity (default) -% -% Outputs: -% Ci, optimal community structure -% Q, maximized modularity -% -% Note: -% This algorithm is essentially deterministic. The only potential -% source of stochasticity occurs at the iterative finetuning step, in -% the presence of non-unique optimal swaps. However, the present -% implementation always makes the first available optimal swap and -% is therefore deterministic. -% -% References: -% Newman (2006) -- Phys Rev E 74:036104, PNAS 23:8577-8582. -% Reichardt and Bornholdt (2006) Phys Rev E 74:016110. -% -% 2008-2016 -% Mika Rubinov, UNSW -% Jonathan Power, WUSTL -% Dani Bassett, UCSB -% Xindi Wang, Beijing Normal University -% Roan LaPlante, Martinos Center for Biomedical Imaging - -% Modification History: -% Jul 2008: Original (Mika Rubinov) -% Oct 2008: Positive eigenvalues made insufficient for division (Jonathan Power) -% Dec 2008: Fine-tuning made consistent with Newman's description (Jonathan Power) -% Dec 2008: Fine-tuning vectorized (Mika Rubinov) -% Sep 2010: Node identities permuted (Dani Bassett) -% Dec 2013: Gamma resolution parameter included (Mika Rubinov) -% Dec 2013: Detection of maximum real part of eigenvalues enforced (Mika Rubinov) -% Thanks to Mason Porter and Jack Setford, University of Oxford -% Dec 2015: Single moves during fine-tuning enforced (Xindi Wang) -% Jan 2017: Removed node permutation and updated documentation (Roan LaPlante) - -if ~exist('gamma','var') - gamma = 1; -end - -N=length(A); %number of vertices -% n_perm = randperm(N); %DB: randomly permute order of nodes -% A = A(n_perm,n_perm); %DB: use permuted matrix for subsequent analysis -K=sum(A); %degree -m=sum(K); %number of edges (each undirected edge is counted twice) -B=A-gamma*(K.'*K)/m; %modularity matrix -Ci=ones(N,1); %community indices -cn=1; %number of communities -U=[1 0]; %array of unexamined communites - -ind=1:N; -Bg=B; -Ng=N; - -while U(1) %examine community U(1) - [V,D]=eig(Bg); - [~,i1]=max(real(diag(D))); %maximal positive (real part of) eigenvalue of Bg - v1=V(:,i1); %corresponding eigenvector - - S=ones(Ng,1); - S(v1<0)=-1; - q=S.'*Bg*S; %contribution to modularity - - if q>1e-10 %contribution positive: U(1) is divisible - qmax=q; %maximal contribution to modularity - Bg(logical(eye(Ng)))=0; %Bg is modified, to enable fine-tuning - indg=ones(Ng,1); %array of unmoved indices - Sit=S; - while any(indg) %iterative fine-tuning - Qit=qmax-4*Sit.*(Bg*Sit); %this line is equivalent to: - [qmax,imax]=max(Qit.*indg); %for i=1:Ng - Sit(imax)=-Sit(imax); % Sit(i)=-Sit(i); - indg(imax)=nan; % Qit(i)=Sit.'*Bg*Sit; - if qmax>q % Sit(i)=-Sit(i); - q=qmax; %end - S=Sit; - end - end - - if abs(sum(S))==Ng %unsuccessful splitting of U(1) - U(1)=[]; - else - cn=cn+1; - Ci(ind(S==1))=U(1); %split old U(1) into new U(1) and into cn - Ci(ind(S==-1))=cn; - U=[cn U]; %#ok - end - else %contribution nonpositive: U(1) is indivisible - U(1)=[]; - end - - ind=find(Ci==U(1)); %indices of unexamined community U(1) - bg=B(ind,ind); - Bg=bg-diag(sum(bg)); %modularity matrix for U(1) - Ng=length(ind); %number of vertices in U(1) -end - -s=Ci(:,ones(1,N)); %compute modularity -Q=~(s-s.').*B/m; -Q=sum(Q(:)); -% Ci_corrected = zeros(N,1); % DB: initialize Ci_corrected -% Ci_corrected(n_perm) = Ci; % DB: return order of nodes to the order used at the input stage. -% Ci = Ci_corrected; % DB: output corrected community assignments diff --git a/DefaultData/2019_03_03_BCT/module_degree_zscore.m b/DefaultData/2019_03_03_BCT/module_degree_zscore.m deleted file mode 100755 index 4742b7d..0000000 --- a/DefaultData/2019_03_03_BCT/module_degree_zscore.m +++ /dev/null @@ -1,41 +0,0 @@ -function Z=module_degree_zscore(W,Ci,flag) -%MODULE_DEGREE_ZSCORE Within-module degree z-score -% -% Z=module_degree_zscore(W,Ci,flag); -% -% The within-module degree z-score is a within-module version of degree -% centrality. -% -% Inputs: W, binary/weighted, directed/undirected connection matrix -% Ci, community affiliation vector -% flag, 0, undirected graph (default) -% 1, directed graph: out-degree -% 2, directed graph: in-degree -% 3, directed graph: out-degree and in-degree -% -% Output: Z, within-module degree z-score. -% -% Reference: Guimera R, Amaral L. Nature (2005) 433:895-900. -% -% -% Mika Rubinov, UNSW, 2008-2010 - -if ~exist('flag','var') - flag=0; -end - -switch flag - case 0 % no action required - case 1 % no action required - case 2; W=W.'; - case 3; W=W+W.'; -end - -n=length(W); %number of vertices -Z=zeros(n,1); -for i=1:max(Ci) - Koi=sum(W(Ci==i,Ci==i),2); - Z(Ci==i)=(Koi-mean(Koi))./std(Koi); -end - -Z(isnan(Z))=0; \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/motif3funct_bin.m b/DefaultData/2019_03_03_BCT/motif3funct_bin.m deleted file mode 100755 index 74dafe7..0000000 --- a/DefaultData/2019_03_03_BCT/motif3funct_bin.m +++ /dev/null @@ -1,78 +0,0 @@ -function [f,F]=motif3funct_bin(A) -%MOTIF3FUNCT_BIN Frequency of functional class-3 motifs -% -% [f,F] = motif3funct_bin(A); -% -% *Structural motifs* are patterns of local connectivity in complex -% networks. In contrast, *functional motifs* are all possible subsets of -% patterns of local connectivity embedded within structural motifs. Such -% patterns are particularly diverse in directed networks. The motif -% frequency of occurrence around an individual node is known as the motif -% fingerprint of that node. The total motif frequency of occurrence in -% the whole network is correspondingly known as the motif fingerprint of -% the network. -% -% Input: A, binary directed connection matrix -% -% Output: F, node motif frequency fingerprint -% f, network motif frequency fingerprint -% -% Notes: -% 1. The function find_motif34.m outputs the motif legend. -% 2. There is a source of possible confusion in motif terminology. -% Motifs ("structural" and "functional") are most frequently -% considered only in the context of anatomical brain networks -% (Sporns and Kötter, 2004). On the other hand, motifs are not -% commonly studied in undirected networks, due to the paucity of -% local undirected connectivity patterns. -% -% References: Milo et al. (2002) Science 298:824-827 -% Sporns O, Kötter R (2004) PLoS Biol 2: e369 -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2015 - -% Modification History: -% 2007: Original -% 2015: Improved documentation - -persistent M3 ID3 N3 -if isempty(N3) - load motif34lib M3 ID3 N3 %load motif data -end - -n=length(A); %number of vertices in A -f=zeros(13,1); %motif count for whole graph -F=zeros(13,n); %frequency - -A=1*(A~=0); %adjacency matrix -As=A|A.'; %symmetrized adjacency - -for u=1:n-2 %loop u 1:n-2 - V1=[false(1,u) As(u,u+1:n)]; %v1: neibs of u (>u) - for v1=find(V1) - V2=[false(1,u) As(v1,u+1:n)]; %v2: all neibs of v1 (>u) - V2(V1)=0; %not already in V1 - V2=([false(1,v1) As(u,v1+1:n)])|V2; %and all neibs of u (>v1) - for v2=find(V2) - a=[A(v1,u);A(v2,u);A(u,v1);A(v2,v1);A(u,v2);A(v1,v2)]; - ind=(M3*a)==N3; %find all contained isomorphs - id=ID3(ind); - - [idu,j]=unique(id); %unique motif occurences - j=[0;j]; %#ok - mu=length(idu); %number of unique motifs - f2=zeros(mu,1); - - for h=1:mu %for each unique motif - f2(h)=j(h+1)-j(h); %and frequencies - end - - %then add to cumulative count - f(idu)=f(idu)+f2; - if nargout==2 - F(idu,[u v1 v2])=F(idu,[u v1 v2])+[f2 f2 f2]; - end - end - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/motif3funct_wei.m b/DefaultData/2019_03_03_BCT/motif3funct_wei.m deleted file mode 100755 index bab969d..0000000 --- a/DefaultData/2019_03_03_BCT/motif3funct_wei.m +++ /dev/null @@ -1,101 +0,0 @@ -function [I,Q,F]=motif3funct_wei(W) -%MOTIF3FUNCT_WEI Intensity and coherence of functional class-3 motifs -% -% [I,Q,F] = motif3funct_wei(W); -% -% *Structural motifs* are patterns of local connectivity in complex -% networks. In contrast, *functional motifs* are all possible subsets of -% patterns of local connectivity embedded within structural motifs. Such -% patterns are particularly diverse in directed networks. The motif -% frequency of occurrence around an individual node is known as the motif -% fingerprint of that node. The motif intensity and coherence are -% weighted generalizations of the motif frequency. The motif -% intensity is equivalent to the geometric mean of weights of links -% comprising each motif. The motif coherence is equivalent to the ratio -% of geometric and arithmetic means of weights of links comprising each -% motif. -% -% Input: W, weighted directed connection matrix -% (all weights must be between 0 and 1) -% -% Output: I, node motif intensity fingerprint -% Q, node motif coherence fingerprint -% F, node motif frequency fingerprint -% -% Notes: -% 1. The function find_motif34.m outputs the motif legend. -% 2. Average intensity and coherence are given by I./F and Q./F -% 3. All weights must be between 0 and 1. This may be achieved using -% the weight_conversion.m function, as follows: -% W_nrm = weight_conversion(W, 'normalize'); -% 4. There is a source of possible confusion in motif terminology. -% Motifs ("structural" and "functional") are most frequently -% considered only in the context of anatomical brain networks -% (Sporns and Kötter, 2004). On the other hand, motifs are not -% commonly studied in undirected networks, due to the paucity of -% local undirected connectivity patterns. -% -% References: Onnela et al. (2005), Phys Rev E 71:065103 -% Milo et al. (2002) Science 298:824-827 -% Sporns O, Kötter R (2004) PLoS Biol 2: e369 -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2015 - -% Modification History: -% 2007: Original -% 2015: Improved documentation - -persistent M3 ID3 N3 -if isempty(N3) - load motif34lib M3 ID3 N3 %load motif data -end - -n=length(W); %number of vertices in W -I=zeros(13,n); %intensity -Q=zeros(13,n); %coherence -F=zeros(13,n); %frequency - -A=1*(W~=0); %adjacency matrix -As=A|A.'; %symmetrized adjacency - -for u=1:n-2 %loop u 1:n-2 - V1=[false(1,u) As(u,u+1:n)]; %v1: neibs of u (>u) - for v1=find(V1) - V2=[false(1,u) As(v1,u+1:n)]; %v2: all neibs of v1 (>u) - V2(V1)=0; %not already in V1 - V2=([false(1,v1) As(u,v1+1:n)])|V2; %and all neibs of u (>v1) - for v2=find(V2) - w=[W(v1,u) W(v2,u) W(u,v1) W(v2,v1) W(u,v2) W(v1,v2)]; - a=[A(v1,u);A(v2,u);A(u,v1);A(v2,v1);A(u,v2);A(v1,v2)]; - ind=(M3*a)==N3; %find all contained isomorphs - m=sum(ind); %number of isomorphs - - M=M3(ind,:).*repmat(w,m,1); - id=ID3(ind); - l=N3(ind); - - x=sum(M,2)./l; %arithmetic mean - M(M==0)=1; %enable geometric mean - i=prod(M,2).^(1./l); %intensity - q=i./x; %coherence - - [idu,j]=unique(id); %unique motif occurences - j=[0;j]; %#ok - mu=length(idu); %number of unique motifs - i2=zeros(mu,1); - q2=i2; f2=i2; - - for h=1:mu %for each unique motif - i2(h)=sum(i(j(h)+1:j(h+1))); %sum all intensities, - q2(h)=sum(q(j(h)+1:j(h+1))); %coherences - f2(h)=j(h+1)-j(h); %and frequencies - end - - %then add to cumulative count - I(idu,[u v1 v2])=I(idu,[u v1 v2])+[i2 i2 i2]; - Q(idu,[u v1 v2])=Q(idu,[u v1 v2])+[q2 q2 q2]; - F(idu,[u v1 v2])=F(idu,[u v1 v2])+[f2 f2 f2]; - end - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/motif3struct_bin.m b/DefaultData/2019_03_03_BCT/motif3struct_bin.m deleted file mode 100755 index 3653a73..0000000 --- a/DefaultData/2019_03_03_BCT/motif3struct_bin.m +++ /dev/null @@ -1,56 +0,0 @@ -function [f,F]=motif3struct_bin(A) -%MOTIF3STRUCT_BIN Frequency of structural class-3 motifs -% -% [f,F] = motif3struct_bin(A); -% -% Structural motifs are patterns of local connectivity in complex -% networks. Such patterns are particularly diverse in directed networks. -% The motif frequency of occurrence around an individual node is known as -% the motif fingerprint of that node. The total motif frequency of -% occurrence in the whole network is correspondingly known as the -% motif fingerprint of the network. -% -% Input: A, binary directed connection matrix -% -% Output: F, node motif frequency fingerprint -% f, network motif frequency fingerprint -% -% Note: The function find_motif34.m outputs the motif legend. -% -% References: Milo et al. (2002) Science 298:824-827 -% Sporns O, Kötter R (2004) PLoS Biol 2: e369 -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2015 - -% Modification History: -% 2007: Original -% 2015: Improved documentation - -persistent M3n ID3 -if isempty(ID3) - load motif34lib M3n ID3 %load motif data -end - -n=length(A); %number of vertices in A -F=zeros(13,n); %motif count of each vertex -f=zeros(13,1); %motif count for whole graph -As=A|A.'; %symmetrized adjacency matrix - - -for u=1:n-2 %loop u 1:n-2 - V1=[false(1,u) As(u,u+1:n)]; %v1: neibs of u (>u) - for v1=find(V1) - V2=[false(1,u) As(v1,u+1:n)]; %v2: all neibs of v1 (>u) - V2(V1)=0; %not already in V1 - V2=([false(1,v1) As(u,v1+1:n)])|V2; %and all neibs of u (>v1) - for v2=find(V2) - - s=uint32(sum(10.^(5:-1:0).*[A(v1,u) A(v2,u) A(u,v1)... - A(v2,v1) A(u,v2) A(v1,v2)])); - ind=ID3(s==M3n); - if nargout==2; F(ind,[u v1 v2])=F(ind,[u v1 v2])+1; end - f(ind)=f(ind)+1; - end - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/motif3struct_wei.m b/DefaultData/2019_03_03_BCT/motif3struct_wei.m deleted file mode 100755 index 7d72068..0000000 --- a/DefaultData/2019_03_03_BCT/motif3struct_wei.m +++ /dev/null @@ -1,80 +0,0 @@ -function [I,Q,F]=motif3struct_wei(W) -%MOTIF3STRUCT_WEI Intensity and coherence of structural class-3 motifs -% -% [I,Q,F] = motif3struct_wei(W); -% -% Structural motifs are patterns of local connectivity in complex -% networks. Such patterns are particularly diverse in directed networks. -% The motif frequency of occurrence around an individual node is known as -% the motif fingerprint of that node. The motif intensity and coherence -% are weighted generalizations of the motif frequency. The motif -% intensity is equivalent to the geometric mean of weights of links -% comprising each motif. The motif coherence is equivalent to the ratio -% of geometric and arithmetic means of weights of links comprising each -% motif. -% -% Input: W, weighted directed connection matrix -% (all weights must be between 0 and 1) -% -% Output: I, node motif intensity fingerprint -% Q, node motif coherence fingerprint -% F, node motif frequency fingerprint -% -% Notes: -% 1. The function find_motif34.m outputs the motif legend. -% 2. Average intensity and coherence are given by I./F and Q./F -% 3. All weights must be between 0 and 1. This may be achieved using -% the weight_conversion.m function, as follows: -% W_nrm = weight_conversion(W, 'normalize'); -% -% References: Onnela et al. (2005), Phys Rev E 71:065103 -% Milo et al. (2002) Science 298:824-827 -% Sporns O, Kötter R (2004) PLoS Biol 2: e369% -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2015 - -% Modification History: -% 2007: Original -% 2015: Improved documentation - -persistent M3 M3n ID3 N3 -if isempty(N3) - load motif34lib M3 M3n ID3 N3 %load motif data -end - -n=length(W); %number of vertices in W -I=zeros(13,n); %intensity -Q=zeros(13,n); %coherence -F=zeros(13,n); %frequency - -A=1*(W~=0); %adjacency matrix -As=A|A.'; %symmetrized adjacency - -for u=1:n-2 %loop u 1:n-2 - V1=[false(1,u) As(u,u+1:n)]; %v1: neibs of u (>u) - for v1=find(V1) - V2=[false(1,u) As(v1,u+1:n)]; %v2: all neibs of v1 (>u) - V2(V1)=0; %not already in V1 - V2=([false(1,v1) As(u,v1+1:n)])|V2; %and all neibs of u (>v1) - for v2=find(V2) - w=[W(v1,u) W(v2,u) W(u,v1) W(v2,v1) W(u,v2) W(v1,v2)]; - s=uint32(sum(10.^(5:-1:0).*[A(v1,u) A(v2,u) A(u,v1)... - A(v2,v1) A(u,v2) A(v1,v2)])); - ind=(s==M3n); - - M=w.*M3(ind,:); - id=ID3(ind); - l=N3(ind); - x=sum(M,2)/l; %arithmetic mean - M(M==0)=1; %enable geometric mean - i=prod(M,2)^(1/l); %intensity - q=i/x; %coherence - - %then add to cumulative count - I(id,[u v1 v2])=I(id,[u v1 v2])+[i i i]; - Q(id,[u v1 v2])=Q(id,[u v1 v2])+[q q q]; - F(id,[u v1 v2])=F(id,[u v1 v2])+[1 1 1]; - end - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/motif4funct_bin.m b/DefaultData/2019_03_03_BCT/motif4funct_bin.m deleted file mode 100755 index 34b4e4c..0000000 --- a/DefaultData/2019_03_03_BCT/motif4funct_bin.m +++ /dev/null @@ -1,88 +0,0 @@ -function [f,F]=motif4funct_bin(A) -%MOTIF4FUNCT_BIN Frequency of functional class-4 motifs -% -% [f,F] = motif4funct_bin(A); -% -% *Structural motifs* are patterns of local connectivity in complex -% networks. In contrast, *functional motifs* are all possible subsets of -% patterns of local connectivity embedded within structural motifs. Such -% patterns are particularly diverse in directed networks. The motif -% frequency of occurrence around an individual node is known as the motif -% fingerprint of that node. The total motif frequency of occurrence in -% the whole network is correspondingly known as the motif fingerprint of -% the network. -% -% Input: A, binary directed connection matrix -% -% Output: F, node motif frequency fingerprint -% f, network motif frequency fingerprint -% -% Notes: -% 1. The function find_motif34.m outputs the motif legend. -% 2. There is a source of possible confusion in motif terminology. -% Motifs ("structural" and "functional") are most frequently -% considered only in the context of anatomical brain networks -% (Sporns and Kötter, 2004). On the other hand, motifs are not -% commonly studied in undirected networks, due to the paucity of -% local undirected connectivity patterns. -% -% References: Milo et al. (2002) Science 298:824-827 -% Sporns O, Kötter R (2004) PLoS Biol 2: e369 -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2015 - -% Modification History: -% 2007: Original -% 2015: Improved documentation - -persistent M4 ID4 N4 -if isempty(N4) - load motif34lib M4 ID4 N4 %load motif data -end - -n=length(A); %number of vertices in A -f=zeros(199,1); -F=zeros(199,n); %frequency - -A=1*(A~=0); %adjacency matrix -As=A|A.'; %symmetrized adjacency - -for u=1:n-3 %loop u 1:n-2 - V1=[false(1,u) As(u,u+1:n)]; %v1: neibs of u (>u) - for v1=find(V1) - V2=[false(1,u) As(v1,u+1:n)]; %v2: all neibs of v1 (>u) - V2(V1)=0; %not already in V1 - V2=V2|([false(1,v1) As(u,v1+1:n)]); %and all neibs of u (>v1) - for v2=find(V2) - vz=max(v1,v2); %vz: largest rank node - V3=([false(1,u) As(v2,u+1:n)]); %v3: all neibs of v2 (>u) - V3(V2)=0; %not already in V1&V2 - V3=V3|([false(1,v2) As(v1,v2+1:n)]);%and all neibs of v1 (>v2) - V3(V1)=0; %not already in V1 - V3=V3|([false(1,vz) As(u,vz+1:n)]); %and all neibs of u (>vz) - for v3=find(V3) - - a=[A(v1,u);A(v2,u);A(v3,u);A(u,v1);A(v2,v1);A(v3,v1);... - A(u,v2);A(v1,v2);A(v3,v2);A(u,v3);A(v1,v3);A(v2,v3)]; - ind=(M4*a)==N4; %find all contained isomorphs - id=ID4(ind); - - [idu,j]=unique(id); %unique motif occurences - j=[0;j]; %#ok - mu=length(idu); %number of unique motifs - f2=zeros(mu,1); - - for h=1:mu %for each unique motif - f2(h)=j(h+1)-j(h); %and frequencies - end - - %then add to cumulative count - f(idu)=f(idu)+f2; - if nargout==2 - F(idu,[u v1 v2 v3])=F(idu,[u v1 v2 v3])+[f2 f2 f2 f2]; - end - end - end - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/motif4funct_wei.m b/DefaultData/2019_03_03_BCT/motif4funct_wei.m deleted file mode 100755 index c55e102..0000000 --- a/DefaultData/2019_03_03_BCT/motif4funct_wei.m +++ /dev/null @@ -1,111 +0,0 @@ -function [I,Q,F]=motif4funct_wei(W) -%MOTIF4FUNCT_WEI Intensity and coherence of functional class-4 motifs -% -% [I,Q,F] = motif4funct_wei(W); -% -% *Structural motifs* are patterns of local connectivity in complex -% networks. In contrast, *functional motifs* are all possible subsets of -% patterns of local connectivity embedded within structural motifs. Such -% patterns are particularly diverse in directed networks. The motif -% frequency of occurrence around an individual node is known as the motif -% fingerprint of that node. The motif intensity and coherence are -% weighted generalizations of the motif frequency. The motif -% intensity is equivalent to the geometric mean of weights of links -% comprising each motif. The motif coherence is equivalent to the ratio -% of geometric and arithmetic means of weights of links comprising each -% motif. -% -% Input: W, weighted directed connection matrix -% (all weights must be between 0 and 1) -% -% Output: I, node motif intensity fingerprint -% Q, node motif coherence fingerprint -% F, node motif frequency fingerprint -% -% Notes: -% 1. The function find_motif34.m outputs the motif legend. -% 2. Average intensity and coherence are given by I./F and Q./F -% 3. All weights must be between 0 and 1. This may be achieved using -% the weight_conversion.m function, as follows: -% W_nrm = weight_conversion(W, 'normalize'); -% 4. There is a source of possible confusion in motif terminology. -% Motifs ("structural" and "functional") are most frequently -% considered only in the context of anatomical brain networks -% (Sporns and Kötter, 2004). On the other hand, motifs are not -% commonly studied in undirected networks, due to the paucity of -% local undirected connectivity patterns. -% -% References: Onnela et al. (2005), Phys Rev E 71:065103 -% Milo et al. (2002) Science 298:824-827 -% Sporns O, Kötter R (2004) PLoS Biol 2: e369% -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2015 - -% Modification History: -% 2007: Original -% 2015: Improved documentation - -persistent M4 ID4 N4 -if isempty(N4) - load motif34lib M4 ID4 N4 %load motif data -end - -n=length(W); %number of vertices in W -I=zeros(199,n); %intensity -Q=zeros(199,n); %coherence -F=zeros(199,n); %frequency - -A=1*(W~=0); %adjacency matrix -As=A|A.'; %symmetrized adjacency - -for u=1:n-3 %loop u 1:n-2 - V1=[false(1,u) As(u,u+1:n)]; %v1: neibs of u (>u) - for v1=find(V1) - V2=[false(1,u) As(v1,u+1:n)]; %v2: all neibs of v1 (>u) - V2(V1)=0; %not already in V1 - V2=V2|([false(1,v1) As(u,v1+1:n)]); %and all neibs of u (>v1) - for v2=find(V2) - vz=max(v1,v2); %vz: largest rank node - V3=([false(1,u) As(v2,u+1:n)]); %v3: all neibs of v2 (>u) - V3(V2)=0; %not already in V1&V2 - V3=V3|([false(1,v2) As(v1,v2+1:n)]);%and all neibs of v1 (>v2) - V3(V1)=0; %not already in V1 - V3=V3|([false(1,vz) As(u,vz+1:n)]); %and all neibs of u (>vz) - for v3=find(V3) - - w=[W(v1,u) W(v2,u) W(v3,u) W(u,v1) W(v2,v1) W(v3,v1)... - W(u,v2) W(v1,v2) W(v3,v2) W(u,v3) W(v1,v3) W(v2,v3)]; - a=[A(v1,u);A(v2,u);A(v3,u);A(u,v1);A(v2,v1);A(v3,v1);... - A(u,v2);A(v1,v2);A(v3,v2);A(u,v3);A(v1,v3);A(v2,v3)]; - ind=(M4*a)==N4; %find all contained isomorphs - m=sum(ind); %number of isomorphs - - M=M4(ind,:).*repmat(w,m,1); - id=ID4(ind); - l=N4(ind); - x=sum(M,2)./l; %arithmetic mean - M(M==0)=1; %enable geometric mean - i=prod(M,2).^(1./l); %intensity - q=i./x; %coherence - - [idu,j]=unique(id); %unique motif occurences - j=[0;j]; %#ok - mu=length(idu); %number of unique motifs - i2=zeros(mu,1); - q2=i2; f2=i2; - - for h=1:mu %for each unique motif - i2(h)=sum(i(j(h)+1:j(h+1))); %sum all intensities, - q2(h)=sum(q(j(h)+1:j(h+1))); %coherences - f2(h)=j(h+1)-j(h); %and frequencies - end - - %then add to cumulative count - I(idu,[u v1 v2 v3])=I(idu,[u v1 v2 v3])+[i2 i2 i2 i2]; - Q(idu,[u v1 v2 v3])=Q(idu,[u v1 v2 v3])+[q2 q2 q2 q2]; - F(idu,[u v1 v2 v3])=F(idu,[u v1 v2 v3])+[f2 f2 f2 f2]; - end - end - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/motif4struct_bin.m b/DefaultData/2019_03_03_BCT/motif4struct_bin.m deleted file mode 100755 index 5e3bd7a..0000000 --- a/DefaultData/2019_03_03_BCT/motif4struct_bin.m +++ /dev/null @@ -1,65 +0,0 @@ -function [f,F]=motif4struct_bin(A) -%MOTIF4STRUCT_BIN Frequency of structural class-4 motifs -% -% [f,F] = motif4struct_bin(A); -% -% Structural motifs are patterns of local connectivity in complex -% networks. Such patterns are particularly diverse in directed networks. -% The motif frequency of occurrence around an individual node is known as -% the motif fingerprint of that node. The total motif frequency of -% occurrence in the whole network is correspondingly known as the -% motif fingerprint of the network. -% -% Input: A, binary directed connection matrix -% -% Output: F, node motif frequency fingerprint -% f, network motif frequency fingerprint -% -% Note: The function find_motif34.m outputs the motif legend. -% -% References: Milo et al. (2002) Science 298:824-827 -% Sporns O, Kötter R (2004) PLoS Biol 2: e369 -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2015 - -% Modification History: -% 2007: Original -% 2015: Improved documentation - - -persistent M4n ID4 -if isempty(ID4) - load motif34lib M4n ID4 %load motif data -end - -n=length(A); %number of vertices in A -F=zeros(199,n); %motif count of each vertex -f=zeros(199,1); %motif count for whole graph -As=A|A.'; %symmetric adjacency matrix - -for u=1:n-3 %loop u 1:n-2 - V1=[false(1,u) As(u,u+1:n)]; %v1: neibs of u (>u) - for v1=find(V1) - V2=[false(1,u) As(v1,u+1:n)]; %v2: all neibs of v1 (>u) - V2(V1)=0; %not already in V1 - V2=V2|([false(1,v1) As(u,v1+1:n)]); %and all neibs of u (>v1) - for v2=find(V2) - vz=max(v1,v2); %vz: largest rank node - V3=([false(1,u) As(v2,u+1:n)]); %v3: all neibs of v2 (>u) - V3(V2)=0; %not already in V1&V2 - V3=V3|([false(1,v2) As(v1,v2+1:n)]);%and all neibs of v1 (>v2) - V3(V1)=0; %not already in V1 - V3=V3|([false(1,vz) As(u,vz+1:n)]); %and all neibs of u (>vz) - for v3=find(V3) - - s=uint64(sum(10.^(11:-1:0).*[A(v1,u) A(v2,u) A(v3,u)... - A(u,v1) A(v2,v1) A(v3,v1) A(u,v2) A(v1,v2)... - A(v3,v2) A(u,v3) A(v1,v3) A(v2,v3)])); - ind=ID4(s==M4n); - if nargout==2; F(ind,[u v1 v2 v3])=F(ind,[u v1 v2 v3])+1; end - f(ind)=f(ind)+1; - end - end - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/motif4struct_wei.m b/DefaultData/2019_03_03_BCT/motif4struct_wei.m deleted file mode 100755 index 2083596..0000000 --- a/DefaultData/2019_03_03_BCT/motif4struct_wei.m +++ /dev/null @@ -1,91 +0,0 @@ -function [I,Q,F]=motif4struct_wei(W) -%MOTIF4STRUCT_WEI Intensity and coherence of structural class-4 motifs -% -% [I,Q,F] = motif4struct_wei(W); -% -% Structural motifs are patterns of local connectivity in complex -% networks. Such patterns are particularly diverse in directed networks. -% The motif frequency of occurrence around an individual node is known as -% the motif fingerprint of that node. The motif intensity and coherence -% are weighted generalizations of the motif frequency. The motif -% intensity is equivalent to the geometric mean of weights of links -% comprising each motif. The motif coherence is equivalent to the ratio -% of geometric and arithmetic means of weights of links comprising each -% motif. -% -% Input: W, weighted directed connection matrix -% (all weights must be between 0 and 1) -% -% Output: I, node motif intensity fingerprint -% Q, node motif coherence fingerprint -% F, node motif frequency fingerprint -% -% Notes: -% 1. The function find_motif34.m outputs the motif legend. -% 2. Average intensity and coherence are given by I./F and Q./F -% 3. All weights must be between 0 and 1. This may be achieved using -% the weight_conversion.m function, as follows: -% W_nrm = weight_conversion(W, 'normalize'); -% -% References: Onnela et al. (2005), Phys Rev E 71:065103 -% Milo et al. (2002) Science 298:824-827 -% Sporns O, Kötter R (2004) PLoS Biol 2: e369% -% -% -% Mika Rubinov, UNSW/U Cambridge, 2007-2015 - -% Modification History: -% 2007: Original -% 2015: Improved documentation - -persistent M4 M4n ID4 N4 -if isempty(N4) - load motif34lib M4 M4n ID4 N4 %load motif data -end - -n=length(W); %number of vertices in W -I=zeros(199,n); %intensity -Q=zeros(199,n); %coherence -F=zeros(199,n); %frequency - -A=1*(W~=0); %adjacency matrix -As=A|A.'; %symmetrized adjacency - -for u=1:n-3 %loop u 1:n-2 - V1=[false(1,u) As(u,u+1:n)]; %v1: neibs of u (>u) - for v1=find(V1) - V2=[false(1,u) As(v1,u+1:n)]; %v2: all neibs of v1 (>u) - V2(V1)=0; %not already in V1 - V2=V2|([false(1,v1) As(u,v1+1:n)]); %and all neibs of u (>v1) - for v2=find(V2) - vz=max(v1,v2); %vz: largest rank node - V3=([false(1,u) As(v2,u+1:n)]); %v3: all neibs of v2 (>u) - V3(V2)=0; %not already in V1&V2 - V3=V3|([false(1,v2) As(v1,v2+1:n)]);%and all neibs of v1 (>v2) - V3(V1)=0; %not already in V1 - V3=V3|([false(1,vz) As(u,vz+1:n)]); %and all neibs of u (>vz) - for v3=find(V3) - - w=[W(v1,u) W(v2,u) W(v3,u) W(u,v1) W(v2,v1) W(v3,v1)... - W(u,v2) W(v1,v2) W(v3,v2) W(u,v3) W(v1,v3) W(v2,v3)]; - s=uint64(sum(10.^(11:-1:0).*[A(v1,u) A(v2,u) A(v3,u)... - A(u,v1) A(v2,v1) A(v3,v1) A(u,v2) A(v1,v2)... - A(v3,v2) A(u,v3) A(v1,v3) A(v2,v3)])); - ind=(s==M4n); - - M=w.*M4(ind,:); - id=ID4(ind); - l=N4(ind); - x=sum(M,2)/l; %arithmetic mean - M(M==0)=1; %enable geometric mean - i=prod(M,2)^(1/l); %intensity - q=i/x; %coherence - - %then add to cumulative count - I(id,[u v1 v2 v3])=I(id,[u v1 v2 v3])+[i i i i]; - Q(id,[u v1 v2 v3])=Q(id,[u v1 v2 v3])+[q q q q]; - F(id,[u v1 v2 v3])=F(id,[u v1 v2 v3])+[1 1 1 1]; - end - end - end -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/navigation_wu.m b/DefaultData/2019_03_03_BCT/navigation_wu.m deleted file mode 100755 index f3a1cae..0000000 --- a/DefaultData/2019_03_03_BCT/navigation_wu.m +++ /dev/null @@ -1,112 +0,0 @@ -function [sr, PL_bin, PL_wei, PL_dis, paths] = navigation_wu(L, D, max_hops) - -% Navigation of connectivity length matrix L guided by nodal distance D -% -% % Navigation -% [sr, PL_bin, PL_wei] = navigation_wu(L,D); -% % Binary shortest path length -% sp_PL_bin = distance_bin(L); -% % Weighted shortest path length -% sp_PL_wei = distance_wei_floyd(L); -% % Binary efficiency ratio -% er_bin = mean(mean(sp_PL_bin./PL_bin)); -% % Weighted efficiency ratio -% er_wei = mean(mean(sp_PL_wei./PL_wei)); -% -% *** Inputs: -% -% L - Weighted/unweighted directed/undirected NxN SC matrix of connection *lengths* -% L(i,j) is the strength-to-length remapping of the connection weight -% between i and j. L(i,j) = 0 denotes the lack of a connection between i -% and j. -% -% D - Symmetric NxN nodal distance matrix (e.g., Euclidean distance between node centroids) -% -% max_hops (optional) - Limits the maximum number of hops of navigation -% paths -% -% *** Outputs: -% -% sr - The success ratio (scalar) is the proportion of node pairs -% successfully reached by navigation. -% -% PL_bin - NxN matrix of binary navigation path length (i.e., number of hops in -% navigation paths). Infinte values indicate failed navigation paths. -% -% PL_wei - NxN matrix of weighted navigation path length (i.e., sum of connection -% weights--as defined by C--along navigaiton path). Infinte values indicate failed navigation paths. -% -% PL_dis - NxN matrix of distance-based navigation path length (i.e., sum of connection -% distances--as defined by D--along navigaiton path). Infinte values indicate failed navigation paths. -% -% paths - NxN cell of nodes comprising navigation paths. -% -% *** Reference: Seguin et al. (2018) PNAS. -% -% Caio Seguin, University of Melbourne, 2017 - - if nargin == 2 - max_hops = length(L); - end - - N = size(L, 1); - paths = cell(N); - PL_bin = zeros(N); - PL_wei = zeros(N); - PL_dis = zeros(N); - - for i = 1:N - for j = 1:N - if (i ~= j) - - curr_node = i; - last_node = curr_node; - target = j; - paths{i,j} = curr_node; - - pl_bin = 0; - pl_wei = 0; - pl_dis = 0; - - while (curr_node ~= target) - - neighbors = find(L(curr_node,:) ~= 0); - - [~, min_index] = min(D(target, neighbors)); - - next_node = neighbors(min_index); - - if isempty(next_node) || next_node == last_node || pl_bin > max_hops - - pl_bin = Inf; - pl_wei = Inf; - pl_dis = Inf; - break; - - end - - paths{i,j} = [paths{i,j} next_node]; - pl_bin = pl_bin + 1; - pl_wei = L(curr_node, next_node) + pl_wei; - pl_dis = D(curr_node, next_node) + pl_dis; - - last_node = curr_node; - curr_node = next_node; - - end - - PL_bin(i,j) = pl_bin; - PL_wei(i,j) = pl_wei; - PL_dis(i,j) = pl_dis; - - end - end - end - - PL_bin(1:N+1:end) = Inf; - PL_wei(1:N+1:end) = Inf; - PL_dis(1:N+1:end) = Inf; - - sr = 1 - (length(find(PL_bin == Inf)) - N)/(N*N - N); - -end diff --git a/DefaultData/2019_03_03_BCT/null_model_dir_sign.m b/DefaultData/2019_03_03_BCT/null_model_dir_sign.m deleted file mode 100755 index 2332f30..0000000 --- a/DefaultData/2019_03_03_BCT/null_model_dir_sign.m +++ /dev/null @@ -1,188 +0,0 @@ -function [W0, R] = null_model_dir_sign(W,bin_swaps,wei_freq) -%NULL_MODEL_DIR_SIGN Directed random graphs with preserved weight, -% degree and strength distributions -% -% W0 = null_model_dir_sign(W); -% W0 = null_model_dir_sign(W,bin_swaps); -% W0 = null_model_dir_sign(W,bin_swaps,wei_freq); -% [W0 R] = null_model_dir_sign(W,bin_swaps,wei_freq); -% -% This function randomizes an directed network with positive and -% negative weights, while preserving the degree and strength -% distributions. This function calls randmio_dir_signed.m -% -% Inputs: W, Directed weighted connection matrix -% bin_swaps, Average number of swaps of each edge in binary randomization. -% bin_swap=5 is the default (each edge rewired 5 times) -% bin_swap=0 implies no binary randomization -% wei_freq, Frequency of weight sorting in weighted randomization -% wei_freq must be in the range of: 0 < wei_freq <= 1 -% wei_freq=1 implies that weights are sorted at each step -% (default in older [<2011] versions of MATLAB) -% wei_freq=0.1 implies that weights are sorted at each 10th step -% (faster, default in newer versions of MATLAB) -% -% Output: W0, Randomized weighted connection matrix -% R, Correlation coefficients between strength sequences -% of input and output connection matrices -% -% Notes: -% The value of bin_swaps is ignored when binary topology is fully -% connected (e.g. when the network has no negative weights). -% Randomization may be better (and execution time will be slower) for -% higher values of bin_swaps and wei_freq. Higher values of bin_swaps may -% enable a more random binary organization, and higher values of wei_freq -% may enable a more accurate conservation of strength sequences. -% R are the correlation coefficients between positive and negative -% in-strength and out-strength sequences of input and output connection -% matrices and are used to evaluate the accuracy with which strengths -% were preserved. Note that correlation coefficients may be a rough -% measure of strength-sequence accuracy and one could implement more -% formal tests (such as the Kolmogorov-Smirnov test) if desired. -% -% Example usage: -% -% %Create random directed weights matrix -% -% W=randn(100); -% -% %Compute one instance of null model (slow execution time): -% %bin_swaps=5, rewire each binary edge 5 times on average -% %wei_freq=1, sort all edges at every step -% -% tic; [W0_slow R_slow]=null_model_dir_sign(W,5,1); R_slow, toc -% -% R_slow = -% 0.9795 0.9724 0.9772 0.9773 -% Elapsed time is 3.485388 seconds. -% -% %Compute another instance of of null model (fast execution time): -% %bin_swaps=5, rewire each binary edge 5 times on average -% %wei_freq=0.1, sort all edges at every 10th step (10=1/0.1) -% -% tic; [W0_fast R_fast]=null_model_dir_sign(W,5,0.1); R_fast, toc -% -% R_fast = -% 0.9655 0.9652 0.9717 0.9804 -% Elapsed time is 0.763831 seconds. -% -% -% Reference: Rubinov and Sporns (2011) Neuroimage 56:2068-79 -% -% -% 2011-2015, Mika Rubinov, U Cambridge - -% Modification History -% Mar 2011: Original. -% Sep 2012: Edge-sorting acceleration. -% Dec 2015: Enforce preservation of negative degrees in sparse -% networks with negative weights (thanks to Andrew Zalesky). - -%#ok<*ASGLU> - -if ~exist('bin_swaps','var') - bin_swaps=5; -end -if ~exist('wei_freq','var') - if nargin('randperm')==1 - wei_freq=1; - else - wei_freq=0.1; - end -end - -if wei_freq<=0 || wei_freq>1 - error('wei_freq must be in the range of: 0 < wei_freq <= 1.') -end -if wei_freq && wei_freq<1 && nargin('randperm')==1 - warning('wei_freq may only equal 1 in older (<2011) versions of MATLAB.') - wei_freq=1; -end - -n=size(W,1); %number of nodes -W(1:n+1:end)=0; %clear diagonal -Ap = W>0; %positive adjacency matrix -An = W<0; %negative adjacency matrix - -if nnz(Ap)<(n*(n-1)) %if Ap is not full - W_r = randmio_dir_signed(W,bin_swaps); - Ap_r = W_r>0; - An_r = W_r<0; -else - Ap_r = Ap; - An_r = An; -end - -W0=zeros(n); %null model network -for s=[1 -1] - switch s %switch sign (positive/negative) - case 1 - Si=sum(W.*Ap,1).'; %positive in-strength - So=sum(W.*Ap,2); %positive out-strength - Wv=sort(W(Ap)); %sorted weights vector - [I, J]=find(Ap_r); %weights indices - Lij=n*(J-1)+I; %linear weights indices - case -1 - Si=sum(-W.*An,1).'; %negative in-strength - So=sum(-W.*An,2); %negative out-strength - Wv=sort(-W(An)); %sorted weights vector - [I, J]=find(An_r); %weights indices - Lij=n*(J-1)+I; %linear weights indices - end - - P=(So*Si.'); %expected weights matrix - - if wei_freq==1 - for m=numel(Wv):-1:1 %iteratively explore all weights - [dum, Oind]=sort(P(Lij)); %get indices of Lij that sort P - r=ceil(rand*m); - o=Oind(r); %choose random index of sorted expected weight - W0(Lij(o)) = s*Wv(r); %assign corresponding sorted weight at this index - - f = 1 - Wv(r)/So(I(o)); %readjust expected weight probabilities for node I(o) - P(I(o),:) = P(I(o),:)*f; %[1 - Wv(r)/S(I(o)) = (S(I(o)) - Wv(r))/S(I(o))] - f = 1 - Wv(r)/Si(J(o)); %readjust expected weight probabilities for node J(o) - P(:,J(o)) = P(:,J(o))*f; %[1 - Wv(r)/S(J(o)) = (S(J(o)) - Wv(r))/S(J(o))] - - So(I(o)) = So(I(o)) - Wv(r); %readjust in-strength of node I(o) - Si(J(o)) = Si(J(o)) - Wv(r); %readjust out-strength of node J(o) - Lij(o)=[]; %remove current index from further consideration - I(o)=[]; - J(o)=[]; - Wv(r)=[]; %remove current weight from further consideration - end - else - wei_period = round(1/wei_freq); %convert frequency to period - for m=numel(Wv):-wei_period:1 %iteratively explore at the given period - [dum, Oind]=sort(P(Lij)); %get indices of Lij that sort P - R=randperm(m,min(m,wei_period)).'; - - O=Oind(R); %choose random index of sorted expected weight - W0(Lij(O)) = s*Wv(R); %assign corresponding sorted weight at this index - - WAi = accumarray(I(O),Wv(R),[n,1]); - Iu = any(WAi,2); - F = 1 - WAi(Iu)./So(Iu); %readjust expected weight probabilities for node I(o) - P(Iu,:) = P(Iu,:).*F(:,ones(1,n)); %[1 - Wv(r)/S(I(o)) = (S(I(o)) - Wv(r))/S(I(o))] - So(Iu) = So(Iu) - WAi(Iu); %readjust in-strength of node I(o) - - WAj = accumarray(J(O),Wv(R),[n,1]); - Ju = any(WAj,2); - F = 1 - WAj(Ju)./Si(Ju); %readjust expected weight probabilities for node J(o) - P(:,Ju) = P(:,Ju).*F(:,ones(1,n)).'; %[1 - Wv(r)/S(J(o)) = (S(J(o)) - Wv(r))/S(J(o))] - Si(Ju) = Si(Ju) - WAj(Ju); %readjust out-strength of node J(o) - - O=Oind(R); - Lij(O)=[]; %remove current index from further consideration - I(O)=[]; - J(O)=[]; - Wv(R)=[]; %remove current weight from further consideration - end - end -end - -rpos_in=corrcoef(sum( W.*(W>0),1), sum( W0.*(W0>0),1) ); -rpos_ou=corrcoef(sum( W.*(W>0),2), sum( W0.*(W0>0),2) ); -rneg_in=corrcoef(sum(-W.*(W<0),1), sum(-W0.*(W0<0),1) ); -rneg_ou=corrcoef(sum(-W.*(W<0),2), sum(-W0.*(W0<0),2) ); -R=[rpos_in(2) rpos_ou(2) rneg_in(2) rneg_ou(2)]; diff --git a/DefaultData/2019_03_03_BCT/null_model_und_sign.m b/DefaultData/2019_03_03_BCT/null_model_und_sign.m deleted file mode 100755 index 7653df1..0000000 --- a/DefaultData/2019_03_03_BCT/null_model_und_sign.m +++ /dev/null @@ -1,181 +0,0 @@ -function [W0,R] = null_model_und_sign(W,bin_swaps,wei_freq) -%NULL_MODEL_UND_SIGN Random graphs with preserved weight, degree and -% strength distributions -% -% W0 = null_model_und_sign(W); -% W0 = null_model_und_sign(W,bin_swaps); -% W0 = null_model_und_sign(W,bin_swaps,wei_freq); -% [W0 R] = null_model_und_sign(W,bin_swaps,wei_freq); -% -% This function randomizes an undirected network with positive and -% negative weights, while preserving the degree and strength -% distributions. This function calls randmio_und_signed.m -% -% Inputs: W, Undirected weighted connection matrix -% bin_swaps, Average number of swaps of each edge in binary randomization. -% bin_swap=5 is the default (each edge rewired 5 times) -% bin_swap=0 implies no binary randomization -% wei_freq, Frequency of weight sorting in weighted randomization -% wei_freq must be in the range of: 0 < wei_freq <= 1 -% wei_freq=1 implies that weights are resorted at each step -% (default in older [<2011] versions of MATLAB) -% wei_freq=0.1 implies that weights are sorted at each 10th step -% (faster, default in newer versions of Matlab) -% -% Output: W0, Randomized weighted connection matrix -% R, Correlation coefficient between strength sequences -% of input and output connection matrices -% -% Notes: -% The value of bin_swaps is ignored when binary topology is fully -% connected (e.g. when the network has no negative weights). -% Randomization may be better (and execution time will be slower) for -% higher values of bin_swaps and wei_freq. Higher values of bin_swaps may -% enable a more random binary organization, and higher values of wei_freq -% may enable a more accurate conservation of strength sequences. -% R are the correlation coefficients between positive and negative -% strength sequences of input and output connection matrices and are -% used to evaluate the accuracy with which strengths were preserved. Note -% that correlation coefficients may be a rough measure of -% strength-sequence accuracy and one could implement more formal tests -% (such as the Kolmogorov-Smirnov test) if desired. -% -% Example usage: -% -% %Create random weights matrix -% -% W=tril(randn(100),-1); W=W+W.'; -% -% %Compute one instance of null model (slow execution time): -% %bin_swaps=5, rewire each binary edge 5 times on average -% %wei_freq=1, sort all edges at every step -% -% tic; [W0_slow R_slow]=null_model_und_sign(W,5,1); R_slow, toc -% -% R_slow = -% 0.9720 0.9746 -% Elapsed time is 2.112715 seconds. -% -% %Compute another instance of of null model (fast execution time): -% %bin_swaps=5, rewire each binary edge 5 times on average -% %wei_freq=0.1, sort all edges at every 10th step (10=1/0.1) -% -% tic; [W0_fast R_fast]=null_model_und_sign(W,5,0.1); R_fast, toc -% -% R_fast = -% 0.9623 0.9789 -% Elapsed time is 0.584797 seconds. -% -% -% Reference: Rubinov and Sporns (2011) Neuroimage 56:2068-79 -% -% -% 2011-2015, Mika Rubinov, U Cambridge - -% Modification History -% Mar 2011: Original -% Sep 2012: Edge-sorting acceleration -% Dec 2015: Enforce preservation of negative degrees in sparse -% networks with negative weights (thanks to Andrew Zalesky). - -%#ok<*ASGLU> - -if ~exist('bin_swaps','var') - bin_swaps=5; -end -if ~exist('wei_freq','var') - if nargin('randperm')==1 - wei_freq=1; - else - wei_freq=0.1; - end -end - -if wei_freq<=0 || wei_freq>1 - error('wei_freq must be in the range of: 0 < wei_freq <= 1.') -end -if wei_freq && wei_freq<1 && nargin('randperm')==1 - warning('wei_freq may only equal 1 in older (<2011) versions of MATLAB.') - wei_freq=1; -end - -n=size(W,1); %number of nodes -W(1:n+1:end)=0; %clear diagonal -Ap = W>0; %positive adjacency matrix -An = W<0; %negative adjacency matrix - -if nnz(Ap)<(n*(n-1)) %if Ap is not full - W_r = randmio_und_signed(W,bin_swaps); - Ap_r = W_r>0; - An_r = W_r<0; -else - Ap_r = Ap; - An_r = An; -end - -W0=zeros(n); %null model network -for s=[1 -1] - switch s %switch sign (positive/negative) - case 1 - S=sum(W.*Ap,2); %positive strength - Wv=sort(W(triu(Ap))); %sorted weights vector - [I,J]=find(triu(Ap_r)); %weights indices - Lij=n*(J-1)+I; %linear weights indices - case -1 - S=sum(-W.*An,2); %negative strength - Wv=sort(-W(triu(An))); %sorted weights vector - [I,J]=find(triu(An_r)); %weights indices - Lij=n*(J-1)+I; %linear weights indices - end - - P=(S*S.'); %expected weights matrix - - if wei_freq==1 - for m=numel(Wv):-1:1 %iteratively explore all weights - [dum,Oind]=sort(P(Lij)); %get indices of Lij that sort P - r=ceil(rand*m); - o=Oind(r); %choose random index of sorted expected weight - W0(Lij(o)) = s*Wv(r); %assign corresponding sorted weight at this index - - f = 1 - Wv(r)/S(I(o)); %readjust expected weight probabilities for node I(o) - P(I(o),:) = P(I(o),:)*f; %[1 - Wv(r)/S(I(o)) = (S(I(o)) - Wv(r))/S(I(o))] - P(:,I(o)) = P(:,I(o))*f; - f = 1 - Wv(r)/S(J(o)); %readjust expected weight probabilities for node J(o) - P(J(o),:) = P(J(o),:)*f; %[1 - Wv(r)/S(J(o)) = (S(J(o)) - Wv(r))/S(J(o))] - P(:,J(o)) = P(:,J(o))*f; - - S([I(o) J(o)]) = S([I(o) J(o)])-Wv(r); %readjust strengths of nodes I(o) and J(o) - Lij(o)=[]; %remove current index from further consideration - I(o)=[]; - J(o)=[]; - Wv(r)=[]; %remove current weight from further consideration - end - else - wei_period = round(1/wei_freq); %convert frequency to period - for m=numel(Wv):-wei_period:1 %iteratively explore at the given period - [dum,Oind]=sort(P(Lij)); %get indices of Lij that sort P - R=randperm(m,min(m,wei_period)).'; - O = Oind(R); - W0(Lij(O)) = s*Wv(R); %assign corresponding sorted weight at this index - - WA = accumarray([I(O);J(O)],Wv([R;R]),[n,1]); %cumulative weight - IJu = any(WA,2); - F = 1-WA(IJu)./S(IJu); - F = F(:,ones(1,n)); %readjust expected weight probabilities for node I(o) - P(IJu,:) = P(IJu,:).*F; %[1 - Wv(r)/S(I(o)) = (S(I(o)) - Wv(r))/S(I(o))] - P(:,IJu) = P(:,IJu).*F.'; - S(IJu) = S(IJu)-WA(IJu); %re-adjust strengths of nodes I(o) and J(o) - - O=Oind(R); - Lij(O)=[]; %remove current index from further consideration - I(O)=[]; - J(O)=[]; - Wv(R)=[]; %remove current weight from further consideration - end - end -end -W0=W0+W0.'; - -rpos=corrcoef(sum( W.*(W>0)),sum( W0.*(W0>0))); -rneg=corrcoef(sum(-W.*(W<0)),sum(-W0.*(W0<0))); -R=[rpos(2) rneg(2)]; diff --git a/DefaultData/2019_03_03_BCT/pagerank_centrality.m b/DefaultData/2019_03_03_BCT/pagerank_centrality.m deleted file mode 100755 index 94a76d2..0000000 --- a/DefaultData/2019_03_03_BCT/pagerank_centrality.m +++ /dev/null @@ -1,57 +0,0 @@ -function r = pagerank_centrality(A, d, falff) -%PAGERANK_CENTRALITY PageRank centrality -% -% r = pagerank_centrality(A, d, falff) -% -% The PageRank centrality is a variant of eigenvector centrality. This -% function computes the PageRank centrality of each vertex in a graph. -% -% Formally, PageRank is defined as the stationary distribution achieved -% by instantiating a Markov chain on a graph. The PageRank centrality of -% a given vertex, then, is proportional to the number of steps (or amount -% of time) spent at that vertex as a result of such a process. -% -% The PageRank index gets modified by the addition of a damping factor, -% d. In terms of a Markov chain, the damping factor specifies the -% fraction of the time that a random walker will transition to one of its -% current state's neighbors. The remaining fraction of the time the -% walker is restarted at a random vertex. A common value for the damping -% factor is d = 0.85. -% -% Inputs: A, adjacency matrix -% d, damping factor -% falff, initial page rank probability (non-negative) -% -% Outputs: r, vectors of page rankings -% -% Note: The algorithm will work well for smaller matrices (number of -% nodes around 1000 or less) -% -% References: -% -% [1]. GeneRank: Using search engine technology for the analysis of -% microarray experiments, by Julie L. Morrison, Rainer Breitling, Desmond -% J. Higham and David R. Gilbert, BMC Bioinformatics, 6:233, 2005. -% [2]. Boldi P, Santini M, Vigna S (2009) PageRank: Functional -% dependencies. ACM Trans Inf Syst 27, 1-23. -% -% Xi-Nian Zuo, Institute of Psychology, Chinese Academy of Sciences, 2011 -% Rick Betzel, Indiana University, 2012 - -N = size(A,1); -if nargin < 3 - norm_falff = ones(N,1)/N; -else - falff = abs(falff); - norm_falff = falff/sum(falff); -end - -deg = sum(A); -ind = (deg == 0); -deg(ind) = 1; -D1 = zeros(N); -D1(1:(N+1):end) = 1./deg; -B = eye(N) - d*(A*D1); -b = (1-d)*norm_falff; -r = B\b; -r = r/sum(r); \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/participation_coef.m b/DefaultData/2019_03_03_BCT/participation_coef.m deleted file mode 100755 index 0ae4eb3..0000000 --- a/DefaultData/2019_03_03_BCT/participation_coef.m +++ /dev/null @@ -1,49 +0,0 @@ -function P=participation_coef(W,Ci,flag) -%PARTICIPATION_COEF Participation coefficient -% -% P = participation_coef(W,Ci); -% -% Participation coefficient is a measure of diversity of intermodular -% connections of individual nodes. -% -% Inputs: W, binary/weighted, directed/undirected connection matrix -% Ci, community affiliation vector -% flag, 0, undirected graph (default) -% 1, directed graph: out-degree -% 2, directed graph: in-degree -% -% Output: P, participation coefficient -% -% Reference: Guimera R, Amaral L. Nature (2005) 433:895-900. -% -% -% 2008-2015 -% Mika Rubinov, UNSW/U Cambridge -% Alex Fornito, University of Melbourne - -% Modification History: -% Jul 2008: Original (Mika Rubinov) -% Mar 2011: Weighted-network bug fixes (Alex Fornito) -% Jan 2015: Generalized for in- and out-degree (Mika Rubinov) - -if ~exist('flag','var') - flag=0; -end - -switch flag - case 0 % no action required - case 1 % no action required - case 2; W=W.'; -end - -n=length(W); %number of vertices -Ko=sum(W,2); %degree -Gc=(W~=0)*diag(Ci); %neighbor community affiliation -Kc2=zeros(n,1); %community-specific neighbors - -for i=1:max(Ci) - Kc2=Kc2+(sum(W.*(Gc==i),2).^2); -end - -P=ones(n,1)-Kc2./(Ko.^2); -P(~Ko)=0; %P=0 if for nodes with no (out)neighbors diff --git a/DefaultData/2019_03_03_BCT/participation_coef_sign.m b/DefaultData/2019_03_03_BCT/participation_coef_sign.m deleted file mode 100755 index 7568efa..0000000 --- a/DefaultData/2019_03_03_BCT/participation_coef_sign.m +++ /dev/null @@ -1,47 +0,0 @@ -function [Ppos,Pneg]=participation_coef_sign(W,Ci) -%PARTICIPATION_COEF_SIGN Participation coefficient -% -% [Ppos Pneg] = participation_coef_sign(W,Ci); -% -% Participation coefficient is a measure of diversity of intermodular -% connections of individual nodes. -% -% Inputs: W, undirected connection matrix with positive and -% negative weights -% -% Ci, community affiliation vector -% -% Output: Ppos, participation coefficient from positive weights -% -% Pneg, participation coefficient from negative weights -% -% Reference: Guimera R, Amaral L. Nature (2005) 433:895-900. -% -% -% 2011, Mika Rubinov, UNSW - -% Modification History: -% Mar 2011: Original -% Sep 2012: Fixed treatment of nodes with no negative strength -% (thanks to Alex Fornito and Martin Monti) - - -n=length(W); %number of vertices - -Ppos = pcoef( W.*(W>0)); -Pneg = pcoef(-W.*(W<0)); - - function P=pcoef(W_) - S = sum(W_,2); %strength - Gc = (W_~=0)*diag(Ci); %neighbor community affiliation - Sc2 = zeros(n,1); %community-specific neighbors - - for i = 1:max(Ci) - Sc2 = Sc2 + (sum(W_.*(Gc==i),2).^2); - end - - P = ones(n,1) - Sc2./(S.^2); - P(isnan(P)) = 0; - P(~P) = 0; %p_ind=0 if no (out)neighbors - end -end diff --git a/DefaultData/2019_03_03_BCT/partition_distance.m b/DefaultData/2019_03_03_BCT/partition_distance.m deleted file mode 100755 index 6c4fadf..0000000 --- a/DefaultData/2019_03_03_BCT/partition_distance.m +++ /dev/null @@ -1,97 +0,0 @@ -function [VIn, MIn] = partition_distance(Cx, Cy) -%PARTITION_DISTANCE Distance or similarity between community partitions -% -% This function quantifies information-theoretic distance (normalized -% variation of information) or similarity (normalized mutual information) -% between community partitions. -% -% VIn = partition_distance(Cx); -% VIn = partition_distance(Cx, Cy); -% [VIn, MIn] = partition_distance(Cx, Cy); -% -% Inputs: -% Cx, -% Community partition vector or matrix of n rows and p columns, -% n is the number of network nodes, and p is the number of input -% community partitions (in the case of vector input p=1). -% -% Cy (optional argument), -% Community partition vector or matrix of n rows and q columns. n -% is the number of nodes (must be equal to the number of nodes in -% Cq) and q is the number of input community partitions (may be -% different to the number of nodes in Cq). This argument may be -% omitted, in which case, the partition distance is computed -% between all pairwise partitions of Cx. -% -% Outputs: -% VIn, -% Normalized variation of information ([p, q] matrix) -% -% MIn, -% Normalized mutual information ([p, q] matrix) -% -% Notes: -% Mathematical definitions. -% -% VIn = [H(X) + H(Y) - 2MI(X, Y)]/log(n) -% MIn = 2MI(X, Y) / [H(X) + H(Y)] -% -% where H is the entropy and MI is the mutual information -% -% -% Reference: Meila M (2007) J Multivar Anal 98, 873-895. -% -% -% 2011-2017, Mika Rubinov, UNSW, Janelia HHMI - -% Modification History: -% Mar 2011: Original -% Jan 2017: Added computation between input matrices. - -s = (nargin==1); -if s - Cy = Cx; - d = 10.^ceil(log10(double(1 + max( Cx(:)) ))); -else - d = 10.^ceil(log10(double(1 + max([Cx(:);Cy(:)]) ))); -end - -if ~isequal([Cx(:);Cy(:)], int64([Cx(:);Cy(:)])) || min([Cx(:);Cy(:)])<=0 - error('Input partitions must contain only positive integers.') -end - -[n, p] = size(Cx); -HX = zeros(p, 1); -for i = 1:p - Px = nonzeros(accumarray(Cx(:, i), 1)) / n; % P(x) - HX(i) = - sum(Px .* log(Px)); % H(x) -end - -if s - q = p; - HY = HX; -else - [n_, q] = size(Cy); - assert(n == n_); - HY = zeros(q, 1); - for j = 1:q - Py = nonzeros(accumarray(Cy(:, j), 1)) / n; % P(y) - HY(j) = - sum(Py .* log(Py)); % H(y) - end -end - -VIn = zeros(p, q); -MIn = zeros(p, q); -for i = 1:p - j_idx = (s * (i - 1) + 1):q; - for j = j_idx - Pxy = nonzeros(accumarray(d*Cx(:, i) + Cy(:, j), 1)) / n; % P(x,y) - Hxy = -sum(Pxy .* log(Pxy)); % H(x,y) - VIn(i, j) = (2 * Hxy - HX(i) - HY(j)) / log(n); % VIn - MIn(i, j) = 2 * (HX(i) + HY(j) - Hxy) / (HX(i) + HY(j)); % MIn - end - if s - VIn(j_idx, i) = VIn(i, j_idx); - MIn(j_idx, i) = MIn(i, j_idx); - end -end diff --git a/DefaultData/2019_03_03_BCT/path_transitivity.m b/DefaultData/2019_03_03_BCT/path_transitivity.m deleted file mode 100755 index 4cbe692..0000000 --- a/DefaultData/2019_03_03_BCT/path_transitivity.m +++ /dev/null @@ -1,84 +0,0 @@ -function T=path_transitivity(W,transform) -% PATH_TRANSITIVITY Transitivity based on shortest paths -% -% T = path_transitivity(W,transform) -% -% This function computes the density of local detours (triangles) that -% are available along the shortest-paths between all pairs of nodes. -% -% Inputs: -% -% W, -% unweighted/weighted undirected connection *weight* OR *length* -% matrix. -% -% -% transform, -% If the input matrix is a connection *weight* matrix, specify a -% transform that map input connection weights to connection -% lengths. Two transforms are available. -% 'log' -> l_ij = -log(w_ij) -% 'inv' -> l_ij = 1/w_ij -% -% If the input matrix is a connection *length* matrix, do not -% specify a transform (or specify an empty transform argument). -% -% -% Output: -% -% T, -% matrix of pairwise path transitivity. -% -% -% Olaf Sporns, Andrea Avena-Koenigsberger and Joaquin Goñi, IU Bloomington, 2014 -% -% References: Goñi et al (2014) PNAS doi: 10.1073/pnas.131552911 -% - -if ~exist('transform','var') - transform = []; -end - -n=length(W); -m=zeros(n,n); -T=zeros(n,n); - -for i=1:n-1 - for j=i+1:n - x=0; - y=0; - z=0; - for k=1:n - if W(i,k)~=0 && W(j,k)~=0 && k~=i && k~=j - x=x+W(i,k)+W(j,k); - end - if k~=j - y=y+W(i,k); - end - if k~=i - z=z+W(j,k); - end - end - m(i,j)=x/(y+z); - end -end -m=m+m'; - -[~,hops,Pmat] = distance_wei_floyd(W,transform); - -% --- path transitivity ---%% -for i=1:n-1 - for j=i+1:n - x=0; - path = retrieve_shortest_path(i,j,hops,Pmat); - K=length(path); - - for t=1:K-1 - for l=t+1:K - x=x+m(path(t),path(l)); - end - end - T(i,j)=2*x/(K*(K-1)); - end -end -T=T+T'; diff --git a/DefaultData/2019_03_03_BCT/predict_fc.m b/DefaultData/2019_03_03_BCT/predict_fc.m deleted file mode 100755 index d7fcc74..0000000 --- a/DefaultData/2019_03_03_BCT/predict_fc.m +++ /dev/null @@ -1,228 +0,0 @@ -function [FCpre,FCcorr,beta,pred_data,R] = predict_fc(SC,FC,ED,pred_var,model) -% PREDICT_FC Prediction of functional connectivity from structural connectivity -% -% [FCpre,FCcorr,beta,pred_data,R] = predict_fc(SC,FC,ED,pred_var,model) -% [FCpre,FCcorr,beta] = predict_fc(SC,FC,[],{'SPLwei_log','SIwei_log'},'quadratic') -% -% This function computes regression coefficients to predict FC from -% structural-based measures that are used as predictor variables. -% -% Inputs: -% -% SC, -% Weighted/unweighted undirected NxN Structural Connectivity matrix. -% -% FC, -% Functional connections. FC can be a NxN symmetric matrix or an -% ((N*(N-1))/2) x 1 vector containing the upper triangular -% elements of the square FC matrix (excluding diagonal elements). -% -% ED, -% Euclidean distance matrix or upper triangular vector of the -% matrix (optional) -% -% pred_var, -% Set of M predictors. These can be given as an KxM array where -% K = ((N*(N-1))/2) and M is the number of predictors. -% Alternatively, pred_var can be a cell with the names of network -% measures to be used as predictors. Accepted network measure -% names are: -% SPLbin - Shortest-path length (binary) -% SPLwei_inv - Shortest-path length computed with an inv transform -% SPLwei_log - Shortest-path length computed with a log transform -% SPLdist - Shortest-path length computed with no transform -% SIbin - Search Information of binary shortest-paths -% SIwei_inv - Search Information of shortest-paths computed with an inv transform -% SIwei_log - Search Information of shortest-paths computed with a log transform -% SIdist - Search Information of shortest-paths computed with no transform -% T - Path Transitivity -% deltaMFPT - Column-wise z-scored mean first passage time -% neighOverlap - Neighborhood Overlap -% MI - Matching Index -% -% If no predictors are specified, the defaults are {'SPLwei_log', 'SIwei_log'}. -% -% model, -% Specifies the order of the regression model used within -% matlab's function regstats.m. 'model' can be any option -% accepted by matlab's regstats.m function (e.g. 'linear', -% 'interaction', 'quadratic', etc.) If no model is specified, -% 'linear' is the default. -% -% Output: -% -% FCpre, -% Predicted NxN Functional Connectivity matrix -% -% FCcorr, -% Pearson Correlation between PCpred and FC -% -% beta, -% Regression Coefficients -% -% pred_data, -% KxM array of predictors. -% -% R, -% Output from regstats.m (e.g. 'beta', 'yhat', 'rsquare', -% 'adjrsquare', 'tstat', 'r', 'mse', 'standres'). -% -% -% References: Goñi et al (2014) PNAS, 833–838, doi: 10.1073/pnas.1315529111 -% -% -% Andrea Avena-Koenigsberger and Joaquin Goñi, IU Bloomington, 2014 - -% Modification history -% 2012: Original -% 2016: Added more predictors and possibility of accepting predictor -% names as input. - -pred_names = {'SPLbin','SPLwei_inv','SPLwei_log','SPLdist','SIbin',... - 'SIwei_inv','SIwei_log','SIdist','T','deltaMFPT','neighOverlap','MI'}; - -N = size(SC,1); -indx = find(triu(ones(N),1)); - -% select model -if ~exist('model','var') - model = 'linear'; -end - -if ~exist('pred_var','var') && ~isempty(ED) - pred_var = {'ED','SPLwei_log','SI','T'}; - flag_var_names = true; - flag_ED = true; -elseif ~exist('pred_var','var') && isempty(ED) - pred_var = {'SPLwei_log','SI','T'}; - flag_var_names = true; -elseif exist('pred_var','var') && ~isnumeric(pred_var) && ~isempty(ED) - flag_var_names = true; - flag_ED = true; -elseif exist('pred_var','var') && ~isnumeric(pred_var) && isempty(ED) - flag_var_names = true; - flag_ED = false; -elseif exist('pred_var','var') && isnumeric(pred_var) && ~isempty(ED) - flag_var_names = false; - flag_ED = true; -elseif exist('pred_var','var') && isnumeric(pred_var) && isempty(ED) - flag_var_names = false; - flag_ED = false; -else - err_str = '"pred_var" must be an KxM array of M predictors, or any of the following graph-measure names:'; - s1 = sprintf('SPLbin - Shortest-path length (binary) \n'); - s2 = sprintf('SPLwei_inv - Shortest-path length computed with an inv transform \n'); - s3 = sprintf('SPLwei_log - Shortest-path length computed with a log transform \n'); - s4 = sprintf('SPLdist - Shortest-path length computed with no transform \n'); - s5 = sprintf('SIbin - Search Information of binary shortest-paths \n'); - s6 = sprintf('SIwei_inv - Search Information of shortest-paths computed with an inv transform \n'); - s7 = sprintf('SIwei_log - Search Information of shortest-paths computed with a log transform \n'); - s8 = sprintf('SIdist - Search Information of shortest-paths computed with no transform \n'); - s9 = sprintf('T - Path Transitivity \n'); - s10 = sprintf('deltaMFPT - Column-wise z-scored mean first passage time \n'); - s11 = sprintf('neighOverlap - Neighborhood Overlap \n'); - s12 = sprintf('MI - Matching Index \n'); - error('%s \n %s %s %s %s %s %s %s %s %s %s %s %s',err_str,s1,s2,s3,s4,s5,s6,s7,s8,s9,s10,s11,s12); -end - -if flag_ED - [n1,n2] = size(ED); - if n1 == n2 && n1 == N - % square ED matrix - pred_data = ED(indx); - elseif n1 == length(indx) && n2 == 1 - % ED is already an upper-triangle vector - pred_data = ED; - else - error('ED must be square matrix or a vector containing the upper triangle of the square ED matrix \n') - end -else - pred_data = []; -end - - -if flag_var_names - fprintf('\n----------------------'); - fprintf('\n Selected predictors: \n'); - ind2start = size(pred_data,2); - pred_data = [pred_data,zeros(length(indx),length(pred_var))]; - - for v = 1:length(pred_var) - var_ind = find(strcmp(pred_var{v},pred_names)); - switch var_ind - - case 1 %SPLbin - fprintf('Shortest-path length (binary) \n\n'); - data = distance_wei_floyd(double(SC>0)); - case 2 %SPLwei_inv - fprintf('Shortest-path length computed with an inv transform \n'); - data = distance_wei_floyd(SC,'inv'); - case 3 %SPLwei_log - fprintf('Shortest-path length computed with a log transform \n'); - data = distance_wei_floyd(SC,'log'); - case 4 %SPLdist - fprintf('Shortest-path length computed with no transform \n'); - data = distance_wei_floyd(SC); - case 5 %SIbin - fprintf('Search Information of binary shortest-paths \n'); - data = search_information(double(SC>0)); - data = data + data'; - case 6 %SIwei_inv - fprintf('Search Information of shortest-paths computed with an inv transform \n'); - data = search_information(SC,'inv'); - data = data + data'; - case 7 %SIwei_log - fprintf('Search Information of shortest-paths computed with a log transform \n'); - data = search_information(SC,'log'); - data = data + data'; - case 8 %SIdist - fprintf('Search Information of shortest-paths computed with no transform \n'); - data = search_information(SC); - data = data + data'; - case 9 %T - fprintf('Path Transitivity \n'); - data = path_transitivity(double(SC>0)); - case 10 %deltaMFPT - fprintf('Column-wise z-scored mean first passage time \n'); - mfpt = mean_first_passage_time(SC); - deltamfpt = zscore(mfpt,[],1); - data = deltamfpt+deltamfpt'; - case 11 %neighOverlap - fprintf('Neighborhood Overlap \n'); - data = double(SC>0) * double(SC>0)'; - case 12 %MI - fprintf('Matching Index \n'); - data = matching_ind(SC); - otherwise - error('This is not an accepted predictor. See list of available predictors \n') - end - pred_data(:,ind2start+v) = data(indx); - end -else - if size(pred_var,1) == length(indx) - pred_data = [pred_data,pred_var]; - else - error('Custom predictors must be provided as KxM array of M predictors \n'); - end -end - - -[n1,n2] = size(FC); -if n1 == n2 && n1 == N - % square FC matrix - responses = FC(indx); -elseif n1 == length(indx) && n2 == 1 - % FC is already an upper-triangle vector - responses = FC; -else - error('FC must be square matrix or a vector containing the upper triangle (no diagonal elements) of the square FC matrix \n') -end - -% run multilinear model -R = regstats(responses,pred_data,model,{'beta','yhat','rsquare','adjrsquare','tstat','r','mse','standres'}); -beta = R.beta; -FCpre = zeros(size(SC)); -FCpre(indx) = R.yhat; -FCpre = FCpre+FCpre'; -FCcorr = corr(responses,FCpre(indx)); - diff --git a/DefaultData/2019_03_03_BCT/quasi_idempotence.m b/DefaultData/2019_03_03_BCT/quasi_idempotence.m deleted file mode 100755 index 591f4d1..0000000 --- a/DefaultData/2019_03_03_BCT/quasi_idempotence.m +++ /dev/null @@ -1,110 +0,0 @@ -function [XN,IOTA,EPS,U]=quasi_idempotence(X,K) -%QUASI_IDEMPOTENCE Connection matrix quasi-idempotence -% -% [XN,IOTA,EPS,U]=quasi_idempotence(X) -% [XN,IOTA,EPS,U]=quasi_idempotence(X,K) -% -% The degree of quasi-idempotence of a matrix represents how close it -% is to being idempotent, i.e. invariant to squaring. In turn, this -% reflects how closely related the edges in the graph it corresponds to -% are to the sums of all triangles between the corresponding nodes, -% spanning the entirety of the network. This probes a form of -% self-similarity, intended not as between nodes and modules, but -% between as edges and triangles (or more generally paths). -% Networks wherein the edge strengths are weakly related to the -% nodal strengths have low quasi-idempotence, and networks wherein the -% edge strengths are significantly influenced by the strengths of the -% corresponding nodes have high quasi-idempotence. In other words the -% degree of quasi-idempotence may be viewed as a measure of -% "collectivity", meaning how strongly individual nodes participate -% in collective dynamics. -% -% Inputs: -% X, -% undirected weighted/binary connection matrix with -% non-negative weights, -% K, -% number of iterations (optional), -% K0)=0; % null the diagonal in the initial matrix -X=X/norm(X); % normalize to unit norm -XN=X; -mask=triu(ones(N),1)>0; % create mask for superdiagonal elements -U=0; % initialize iterations counter -IOTA=[]; % this vector will contain the correlation coefficients -EPS=inf; % and this will contain the errors -if isinf(K) - while EPS(end)>eps('double') % iterate until error below precision - U=U+1; % increase iteration counter - XN_hat=XN; % save the initial matrix - XN=XN^2; % square the matrix - XN=XN/norm(XN); % normalize it again (for numerical reasons) - IOTA(end+1)=corr(X(mask),XN(mask)); % calculate correlation coefficient - EPS(end+1)=norm(XN_hat-XN); % calculate error - end -else - while U2011) version of MATLAB.') -end - -R = double(W); % sign function requires double input -n = size(R,1); -ITER=ITER*n*(n-1); - -% maximal number of rewiring attempts per 'iter' -maxAttempts=n; -% actual number of successful rewirings -eff = 0; - -for iter=1:ITER - att=0; - while (att<=maxAttempts) %while not rewired - %select four distinct vertices - nodes = randperm(n,4); - a = nodes(1); - b = nodes(2); - c = nodes(3); - d = nodes(4); - - r0_ab = R(a,b); - r0_cd = R(c,d); - r0_ad = R(a,d); - r0_cb = R(c,b); - - %rewiring condition - if (sign(r0_ab)==sign(r0_cd)) && ... - (sign(r0_ad)==sign(r0_cb)) && ... - (sign(r0_ab)~=sign(r0_ad)) - - R(a,d)=r0_ab; R(a,b)=r0_ad; - R(c,b)=r0_cd; R(c,d)=r0_cb; - - eff = eff+1; - break; - end %rewiring condition - att=att+1; - end %while not rewired -end %iterations \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/randmio_und.m b/DefaultData/2019_03_03_BCT/randmio_und.m deleted file mode 100755 index 91ef5e1..0000000 --- a/DefaultData/2019_03_03_BCT/randmio_und.m +++ /dev/null @@ -1,80 +0,0 @@ -function [R,eff]=randmio_und(R, ITER) -%RANDMIO_UND Random graph with preserved degree distribution -% -% R = randmio_und(W,ITER); -% [R eff]=randmio_und(W, ITER); -% -% This function randomizes an undirected network, while preserving the -% degree distribution. The function does not preserve the strength -% distribution in weighted networks. -% -% Input: W, undirected (binary/weighted) connection matrix -% ITER, rewiring parameter -% (each edge is rewired approximately ITER times) -% -% Output: R, randomized network -% eff, number of actual rewirings carried out -% -% References: Maslov and Sneppen (2002) Science 296:910 -% -% -% 2007-2012 -% Mika Rubinov, UNSW -% Jonathan Power, WUSTL -% Olaf Sporns, IU - -% Modification History: -% Jun 2007: Original (Mika Rubinov) -% Apr 2008: Edge c-d is flipped with 50% probability, allowing to explore -% all potential rewirings (Jonathan Power) -% Mar 2012: Limit number of rewiring attempts, count number of successful -% rewirings (Olaf Sporns) - - -n=size(R,1); -[i,j]=find(tril(R)); -K=length(i); -ITER=K*ITER; - -% maximal number of rewiring attempts per 'iter' -maxAttempts= round(n*K/(n*(n-1))); -% actual number of successful rewirings -eff = 0; - -for iter=1:ITER - att=0; - while (att<=maxAttempts) %while not rewired - while 1 - e1=ceil(K*rand); - e2=ceil(K*rand); - while (e2==e1) - e2=ceil(K*rand); - end - a=i(e1); b=j(e1); - c=i(e2); d=j(e2); - - if all(a~=[c d]) && all(b~=[c d]) - break %all four vertices must be different - end - end - - if rand>0.5 - i(e2)=d; j(e2)=c; %flip edge c-d with 50% probability - c=i(e2); d=j(e2); %to explore all potential rewirings - end - - %rewiring condition - if ~(R(a,d) || R(c,b)) - R(a,d)=R(a,b); R(a,b)=0; - R(d,a)=R(b,a); R(b,a)=0; - R(c,b)=R(c,d); R(c,d)=0; - R(b,c)=R(d,c); R(d,c)=0; - - j(e1) = d; %reassign edge indices - j(e2) = b; - eff = eff+1; - break; - end %rewiring condition - att=att+1; - end %while not rewired -end %iterations \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/randmio_und_connected.m b/DefaultData/2019_03_03_BCT/randmio_und_connected.m deleted file mode 100755 index 8d146f5..0000000 --- a/DefaultData/2019_03_03_BCT/randmio_und_connected.m +++ /dev/null @@ -1,107 +0,0 @@ -function [R,eff] = randmio_und_connected(R, ITER) -%RANDMIO_UND_CONNECTED Random graph with preserved degree distribution -% -% R = randmio_und_connected(W,ITER); -% [R eff] = randmio_und_connected(W, ITER); -% -% This function randomizes an undirected network, while preserving the -% degree distribution. The function does not preserve the strength -% distribution in weighted networks. The function also ensures that the -% randomized network maintains connectedness, the ability for every node -% to reach every other node in the network. The input network for this -% function must be connected. -% -% Input: W, undirected (binary/weighted) connection matrix -% ITER, rewiring parameter -% (each edge is rewired approximately ITER times) -% -% Output: R, randomized network -% eff, number of actual rewirings carried out -% -% References: Maslov and Sneppen (2002) Science 296:910 -% -% -% 2007-2012 -% Mika Rubinov, UNSW -% Jonathan Power, WUSTL -% Olaf Sporns, IU - -% Modification History: -% Jun 2007: Original (Mika Rubinov) -% Apr 2008: Edge c-d is flipped with 50% probability, allowing to explore -% all potential rewirings (Jonathan Power) -% Mar 2012: Limit number of rewiring attempts, count number of successful -% rewirings (Olaf Sporns) - - -n=size(R,1); -[i,j]=find(tril(R)); -K=length(i); -ITER=K*ITER; - -% maximal number of rewiring attempts per 'iter' -maxAttempts= round(n*K/(n*(n-1))); -% actual number of successful rewirings -eff = 0; - -for iter=1:ITER - att=0; - while (att<=maxAttempts) %while not rewired - rewire=1; - while 1 - e1=ceil(K*rand); - e2=ceil(K*rand); - while (e2==e1) - e2=ceil(K*rand); - end - a=i(e1); b=j(e1); - c=i(e2); d=j(e2); - - if all(a~=[c d]) && all(b~=[c d]) - break %all four vertices must be different - end - end - - if rand>0.5 - i(e2)=d; j(e2)=c; %flip edge c-d with 50% probability - c=i(e2); d=j(e2); %to explore all potential rewirings - end - - %rewiring condition - if ~(R(a,d) || R(c,b)) - %connectedness condition - if ~(R(a,c) || R(b,d)) - P=R([a d],:); - P(1,b)=0; P(2,c)=0; - PN=P; - PN(:,d)=1; PN(:,a)=1; - - while 1 - P(1,:)=any(R(P(1,:)~=0,:),1); - P(2,:)=any(R(P(2,:)~=0,:),1); - P=P.*(~PN); - if ~all(any(P,2)) - rewire=0; - break - elseif any(any(P(:,[b c]))) - break - end - PN=PN+P; - end - end %connectedness testing - - if rewire %reassign edges - R(a,d)=R(a,b); R(a,b)=0; - R(d,a)=R(b,a); R(b,a)=0; - R(c,b)=R(c,d); R(c,d)=0; - R(b,c)=R(d,c); R(d,c)=0; - - j(e1) = d; %reassign edge indices - j(e2) = b; - eff = eff+1; - break; - end %edge reassignment - end %rewiring condition - att=att+1; - end %while not rewired -end %iterations \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/randmio_und_signed.m b/DefaultData/2019_03_03_BCT/randmio_und_signed.m deleted file mode 100755 index 31bd1be..0000000 --- a/DefaultData/2019_03_03_BCT/randmio_und_signed.m +++ /dev/null @@ -1,80 +0,0 @@ -function [R,eff] = randmio_und_signed(W, ITER) -% RANDMIO_UND_SIGNED Random graph with preserved signed degree distribution -% -% R = randmio_und_signed(W,ITER); -% [R,eff] = randmio_und_signed(W,ITER); -% -% This function randomizes an undirected network with positively and -% negatively signed connections, while preserving the positively and -% negatively signed degree distribution. The function does not preserve -% the strength distribution in weighted networks. -% -% Input: W, undirected (binary/weighted) connection matrix -% ITER, rewiring parameter -% (each edge is rewired approximately ITER times) -% -% Output: R, randomized network -% eff, number of actual rewirings carried out -% -% Reference: Maslov and Sneppen (2002) Science 296:910 -% -% -% 2011-2015 -% Dani Bassett, UCSB -% Olaf Sporns, Indiana U -% Mika Rubinov, U Cambridge - -% Modification History: -% Mar 2011: Original (Dani Bassett, based on randmio_und.m) -% Mar 2012: Limit number of rewiring attempts, -% count number of successful rewirings (Olaf Sporns) -% Dec 2015: Rewritten the core of the rewiring algorithm to allow -% unbiased exploration of all network configurations. The new -% algorithm allows positive-positive/negative-negative -% rewirings, in addition to the previous positive-positive/0-0 -% and negative-negative/0-0 rewirings (Mika Rubinov). - -if nargin('randperm')==1 - warning('This function requires a recent (>2011) version of MATLAB.') -end - -R = double(W); % sign function requires double input -n = size(R,1); -ITER = ITER*n*(n-1)/2; - -% maximal number of rewiring attempts per 'iter' -maxAttempts = round(n/2); -% actual number of successful rewirings -eff = 0; - -for iter=1:ITER - att=0; - while (att<=maxAttempts) %while not rewired - %select four distinct vertices - nodes = randperm(n,4); - a = nodes(1); - b = nodes(2); - c = nodes(3); - d = nodes(4); - - r0_ab = R(a,b); - r0_cd = R(c,d); - r0_ad = R(a,d); - r0_cb = R(c,b); - - %rewiring condition - if (sign(r0_ab)==sign(r0_cd)) && ... - (sign(r0_ad)==sign(r0_cb)) && ... - (sign(r0_ab)~=sign(r0_ad)) - - R(a,d)=r0_ab; R(a,b)=r0_ad; - R(d,a)=r0_ab; R(b,a)=r0_ad; - R(c,b)=r0_cd; R(c,d)=r0_cb; - R(b,c)=r0_cd; R(d,c)=r0_cb; - - eff = eff+1; - break; - end %rewiring condition - att=att+1; - end %while not rewired -end %iterations \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/randomize_graph_partial_und.m b/DefaultData/2019_03_03_BCT/randomize_graph_partial_und.m deleted file mode 100755 index c8216c0..0000000 --- a/DefaultData/2019_03_03_BCT/randomize_graph_partial_und.m +++ /dev/null @@ -1,53 +0,0 @@ -function A = randomize_graph_partial_und(A,B,maxswap) -% RANDOMIZE_GRAPH_PARTIAL_UND Swap edges with preserved degree sequence -% -% A = RANDOMIZE_GRAPH_PARTIAL_UND(A,B,MAXSWAP) takes adjacency matrices A -% and B and attempts to randomize matrix A by performing MAXSWAP -% rewirings. The rewirings will avoid any spots where matrix B is -% nonzero. -% -% Inputs: A, undirected adjacency matrix -% B, edges to avoid -% MAXSWAP, number of rewirings -% -% Outputs: A, randomized matrix -% -% Richard Betzel, Indiana University, 2013 -% -% Notes: -% 1. Based on the script randmio_und.m. -% 2. Graph may become disconnected as a result of rewiring. Always -% important to check. -% 3. A can be weighted, though the weighted degree sequence will not be -% preserved. -% - -[i,j] = find(triu(A,1)); -m = length(i); -nswap = 0; -while nswap < maxswap - while 1 - e1 = randi(m); e2 = randi(m); - while e2 == e1 - e2 = randi(m); - end - a = i(e1); b = j(e1); - c = i(e2); d = j(e2); - if all(a~=[c,d]) && all(b~=[c,d]) - break - end - end - if rand > 0.5 - i(e2) = d; j(e2) = c; - c = i(e2); d = j(e2); - end - if ~(A(a,d) || A(c,b) || B(a,d) || B(c,b)) - A(a,d) = A(a,b); A(a,b) = 0; - A(d,a) = A(b,a); A(b,a) = 0; - A(c,b) = A(c,d); A(c,d) = 0; - A(b,c) = A(d,c); A(d,c) = 0; - j(e1) = d; - j(e2) = b; - nswap = nswap + 1; - end -end diff --git a/DefaultData/2019_03_03_BCT/randomizer_bin_und.m b/DefaultData/2019_03_03_BCT/randomizer_bin_und.m deleted file mode 100755 index ef8d9f4..0000000 --- a/DefaultData/2019_03_03_BCT/randomizer_bin_und.m +++ /dev/null @@ -1,140 +0,0 @@ -function [R] = randomizer_bin_und(R,alpha) -%RANDOMIZER_BIN_UND Random graph with preserved in/out degree distribution -% -% R = randomizer_bin_und(A,alpha); -% -% This function randomizes a binary undirected network, while preserving -% the degree distribution. The function directly searches for rewirable -% edge pairs (rather than trying to rewire edge pairs at random), and -% hence avoids long loops and works especially well in dense matrices. -% -% Inputs: A, binary undirected connection matrix -% alpha, fraction of edges to rewire -% -% Outputs: R, randomized network -% -% References: Maslov and Sneppen (2002) Science 296:910 -% -% -% Jonathan Power, WUSTL. 3/1/10. - -%#ok<*ASGLU> - -% make binary -R=ceil(R); - -% ensure that matrix is binary -if (max(R(:))~=1) || (min(R(:))~=0) - error('Matrix should be binary'); -end - -% ensure that matrix is undirected -if ~isequal(R,R.') - error('Matrix should be undirected'); -end - -% find how many edges are possible in the network -[a,b]=size(R); -numpossibleedges=((a*a)-a)/2; - -% excise the diagonal and replace it with 9999 -savediag=R.*(eye(size(R,1))); -R=R.*(~eye(size(R,1))); -R=R+(eye(size(R,1)).*9999); - -% if there are more edges than non-edges we invert the matrix to reduce -% computation time, then revert at the end of the script -inverted=0; -[i,j]=find(triu(R,1)); -K=size(i,1); -if K>(numpossibleedges/2) - inverted=1; - R=double(~R); - R=R.*(~eye(size(R,1))); - R=R+(eye(size(R,1)).*9999); -end - -% find edges -[i,j]=find(triu(R,1)); -% K=size(i,1); - -% exclude fully connected nodes. will replace later -fullnode=find((sum(triu(R,1),1)+(sum(triu(R,1),2))')==(a-1)); -if ~isempty(fullnode) - R(fullnode,:)=0; R(:,fullnode)=0; - R=R.*(~eye(size(R,1))); - R=R+(eye(size(R,1)).*9999); -end - -% find the edges -[i,j]=find(triu(R,1)); -K=size(i,1); - -if (isempty(K) || K==(numpossibleedges) || (K==numpossibleedges-1)) - fprintf('No possible randomization.\n') -else - for iter=1:K % for every edge - if rand<=alpha % rewire ~alpha% of edges - - % this is the chosen edge - a=i(iter); - b=j(iter); - - % for selected edge, see where each end can connect to - alliholes=find(R(:,i(iter))==0); - alljholes=find(R(:,j(iter))==0); - - % we can only use edges with connection to neither node - iintersect=intersect(alliholes,alljholes); - - % find which of these nodes are connected - [ii,jj]=find(R(iintersect,iintersect)==1); - - % if there an edge to switch - if ~isempty(ii) - - % choose one randomly - nummates=size(ii,1); - mate=ceil(rand*nummates); - - % randomly orient the second edge - if rand<0.5 - c=iintersect(ii(mate)); - d=iintersect(jj(mate)); - else - d=iintersect(ii(mate)); - c=iintersect(jj(mate)); - end - - % make the changes in the matrix - R(a,b)=0; R(c,d)=0; - R(b,a)=0; R(d,c)=0; - R(a,c)=1; R(b,d)=1; - R(c,a)=1; R(d,b)=1; - - % update the edge index - for m=1:K - if ((i(m)==d) && (j(m)==c)) - j(iter)=c; j(m)=b; - elseif ((i(m)==c) && (j(m)==d)) - j(iter)=c; i(m)=b; - end - end - end % rewiring - end % if randtol - inside = 0; - else - inside = 1; - end - end - - % Find nodes inside the box, edges crossing the boundary - - L = find(XYn(:,1)>(randx-deltaL) & XYn(:,1)<(randx+deltaL) ... - & XYn(:,2)>(randy-deltaL) & XYn(:,2)<(randy+deltaL)); - - if ~isempty(L) == 1 - nPartitions = nPartitions+1; - % count edges crossing the boundary of the box - E(nPartitions,1) = sum(sum(A(L,setdiff(1:M,L)))); - % count nodes inside of the box - N(nPartitions,1) = numel(L); - - end - -end - -return; \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/rentian_scaling_3d.m b/DefaultData/2019_03_03_BCT/rentian_scaling_3d.m deleted file mode 100755 index 0016105..0000000 --- a/DefaultData/2019_03_03_BCT/rentian_scaling_3d.m +++ /dev/null @@ -1,209 +0,0 @@ -function [N,E] = rentian_scaling_3d(A,XYZ,n,tol) -% RENTIAN_SCALING_3D Rentian scaling for networks embedded in three dimensions. -% -% [N,E] = rentian_scaling_3d(A,XYZ,n,tol) -% -% Physical Rentian scaling (or more simply Rentian scaling) is a property -% of systems that are cost-efficiently embedded into physical space. It is -% what is called a "topo-physical" property because it combines information -% regarding the topological organization of the graph with information -% about the physical placement of connections. Rentian scaling is present -% in very large scale integrated circuits, the C. elegans neuronal network, -% and morphometric and diffusion-based graphs of human anatomical networks. -% Rentian scaling is determined by partitioning the system into cubes, -% counting the number of nodes inside of each cube (N), and the number of -% edges traversing the boundary of each cube (E). If the system displays -% Rentian scaling, these two variables N and E will scale with one another -% in loglog space. The Rent's exponent is given by the slope of log10(E) -% vs. log10(N), and can be reported alone or can be compared to the -% theoretical minimum Rent's exponent to determine how cost efficiently the -% network has been embedded into physical space. Note: if a system displays -% Rentian scaling, it does not automatically mean that the system is -% cost-efficiently embedded (although it does suggest that). Validation -% occurs when comparing to the theoretical minimum Rent's exponent for that -% system. -% -% INPUTS: -% -% A: MxM adjacency matrix. -% Must be unweighted, binary, and symmetric. -% XYZ: Matrix of node placement coordinates. -% Must be in the form of an Mx3 matrix [x y z], where M is -% the number of nodes and x, y, z are column vectors of node -% coordinates. -% n: Number of partitions to compute. Each partition is a data -% point. You want a large enough number to adequately -% estimate the Rent's exponent. -% tol: This should be a small value (for example 1e-6). -% In order to mitigate the effects of boundary conditions due -% to the finite size of the network, we only allow partitions -% that are contained within the boundary of the network. This -% is achieved by first computing the volume of the convex -% hull of the node coordinates (V). We then ensure that the -% volume of the convex hull computed on the original node -% coordinates plus the coordinates of the randomly generated -% partition (Vnew) is within a given tolerance of the -% original (i.e. check abs(V - Vnew) < tol). Thus tol, should -% be a small value in order to make sure the partitions are -% contained largely within the boundary of the network, and -% thus the number of nodes and edges within the box are not -% skewed by finite size effects. -% -% OUTPUTS: -% -% N: nx1 vector of the number of nodes in each of the n partitions. -% E: nx1 vector of the number of edges crossing the boundary of -% each partition. -% -% Subsequent Analysis: -% -% Rentian scaling plots are created by: figure; loglog(E,N,'*'); -% -% To determine the Rent's exponent, p, we need to determine -% the slope of E vs. N in loglog space, which is the Rent's -% exponent. There are many ways of doing this with more or less -% statistical rigor. Robustfit in MATLAB is one such option: -% -% [b,stats] = robustfit(log10(N),log10(E)) -% -% Then the Rent's exponent is b(1,2) and the standard error of the -% estimation is given by stats.se(1,2). -% -% Note: n=5000 was used in Bassett et al. 2010 in PLoS CB. -% -% Reference: -% Danielle S. Bassett, Daniel L. Greenfield, Andreas Meyer-Lindenberg, -% Daniel R. Weinberger, Simon W. Moore, Edward T. Bullmore. Efficient -% physical embedding of topologically complex information processing -% networks in brains and computer circuits. PLoS Comput Biol, 2010, -% 6(4):e1000748. -% -% Modification History: -% -% 2010: Original (Dani Bassett) -% Dec 2016: Updated code so that both partition centers and partition -% sizes are chosen at random. Also added in a constraint on -% partition placement that prevents boxes from being located -% outside the edges of the network. This helps prevent skewed -% results due to boundary effects arising from the finite size -% of the network. (Lia Papadopoulos) -% - - -% determine the number of nodes in the system -M = numel(XYZ(:,1)); - -% rescale coordinates so that they are all greater than unity -XYZn = XYZ - repmat(min(XYZ)-1,M,1); - -% compute the area of convex hull (i.e. are of the boundary) of the network -[~,V] = convhull(XYZn(:,1),XYZn(:,2),XYZn(:,3)); - -% min and max network coordinates -xmin = min(XYZn(:,1)); -xmax = max(XYZn(:,1)); -ymin = min(XYZn(:,2)); -ymax = max(XYZn(:,2)); -zmin = min(XYZn(:,3)); -zmax = max(XYZn(:,3)); - -% initialize vectors of number of nodes in box and number of edges crossing -% box -N = zeros(n,1); -E = zeros(n,1); - -% create partitions, and count the number of nodes inside the partition (N) -% and the number of edges traversing the boundary of the partition (E) -nPartitions = 0; - -% create partitions, and count the number of nodes inside the partition (N) -% and the number of edges traversing the boundary of the partition (E) - -while nPartitions<(n+1) - - % variable to check if partition center is within network boundary - % OK if inside == 1 - inside = 0; - - while inside == 0 - - % pick a random (x,y,z) coordinate to be the center of the box - randx = xmin+(xmax-xmin)*rand(1); - randy = ymin+(ymax-ymin)*rand(1); - randz = zmin+(zmax-zmin)*rand(1); - - % make sure the point is inside the convex hull of the network - newCoords = [XYZn; [randx randy randz]]; - [~,Vnew] = convhull(newCoords(:,1),newCoords(:,2),newCoords(:,3)); - - % if the old convex hull area and new convex hull area are equal - % then the box center must be inside the network boundary. - - if isequal(V,Vnew)==0 - inside = 0; - else - inside = 1; - end - - end - - % determine the approximate maximum distance the box can extend, given - % the center point and the bounds of the network - deltaX = min(abs(xmax-randx),abs(xmin-randx)); - deltaY = min(abs(ymax-randy),abs(ymin-randy)); - deltaZ = min(abs(zmax-randz),abs(zmin-randz)); - deltaLmin = min([deltaX deltaY deltaZ]); - - % variable to check if partition is within network boundary - % OK if inside == 1 - inside = 0; - - while inside == 0 - - % pick a random (side length)/2 that is between 0 and the - % max possible - deltaL = deltaLmin*rand(1); - - % (x,y,z) coordinates for corners of box - boxCoords = [randx - deltaL randy - deltaL randz - deltaL; ... - randx - deltaL randy - deltaL randz + deltaL; ... - randx - deltaL randy + deltaL randz - deltaL; ... - randx - deltaL randy + deltaL randz + deltaL; ... - randx + deltaL randy - deltaL randz - deltaL;... - randx + deltaL randy - deltaL randz + deltaL; ... - randx + deltaL randy + deltaL randz - deltaL; ... - randx + deltaL randy + deltaL randz + deltaL]; - - % check if all corners of box are inside the convex hull of the - % network - newCoords = [XYZn; boxCoords]; - [~,Vnew] = convhull(newCoords(:,1),newCoords(:,2),newCoords(:,3)); - - % make sure the new convex hull that includes the partition corners - % is within a certain tolerance of the original convex hull area. - - if abs(V-Vnew)>tol - inside = 0; - else - inside = 1; - end - end - - % Find nodes inside the box, edges crossing the boundary - - L = find(XYZn(:,1)>(randx-deltaL) & XYZn(:,1)<(randx+deltaL) ... - & XYZn(:,2)>(randy-deltaL) & XYZn(:,2)<(randy+deltaL) ... - & XYZn(:,3)>(randz-deltaL) & XYZn(:,3)<(randz+deltaL)); - - if ~isempty(L) == 1 - nPartitions = nPartitions+1; - % count edges crossing the boundary of the cube - E(nPartitions,1) = sum(sum(A(L,setdiff(1:M,L)))); - % count nodes inside of the cube - N(nPartitions,1) = numel(L); - - end - -end - -return; \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/reorderMAT.m b/DefaultData/2019_03_03_BCT/reorderMAT.m deleted file mode 100755 index 8601e40..0000000 --- a/DefaultData/2019_03_03_BCT/reorderMAT.m +++ /dev/null @@ -1,64 +0,0 @@ -function [MATreordered,MATindices,MATcost] = reorderMAT(MAT,H,cost) -%REORDERMAT Reorder matrix for visualization -% -% [MATreordered,MATindices,MATcost] = reorderMAT(MAT,H,cost); -% -% This function reorders the connectivity matrix in order to place more -% edges closer to the diagonal. This often helps in displaying community -% structure, clusters, etc. -% -% Inputs: MAT, connection matrix -% H, number of reordering attempts -% cost, 'line' or 'circ', for shape of lattice -% (linear or ring lattice) -% -% MATreordered reordered connection matrix -% MATindices reordered indices -% MATcost cost of reordered matrix -% -% -% Olaf Sporns, Indiana University - - -N = length(MAT); -diagMAT = diag(diag(MAT)); -MAT = MAT-diagMAT; - -% generate cost function -if strcmp(cost,'line') - profil = fliplr(normpdf(1:N,0,N/2)); -end; -if strcmp(cost,'circ') - profil = fliplr(normpdf(1:N,N/2,N/4)); -end; -COST = toeplitz(profil,profil); - -% initialize lowCOST -lowMATcost = sum(sum(COST.*MAT)); - -% keep track of starting configuration -MATstart = MAT; -starta = 1:N; - -% reorder -for h=1:H - a = 1:N; - % choose two positions at random and flip them - r = randperm(N); - a(r(1)) = r(2); - a(r(2)) = r(1); - MATcostnew = sum(sum(MAT(a,a).*COST)); - if (MATcostnew < lowMATcost) - MAT = MAT(a,a); - r2 = starta(r(2)); - r1 = starta(r(1)); - starta(r(1)) = r2; - starta(r(2)) = r1; - lowMATcost = MATcostnew; - end; -end; % h - -MATreordered = MATstart(starta,starta) + diagMAT(starta,starta); -MATindices = starta; -MATcost = lowMATcost; - diff --git a/DefaultData/2019_03_03_BCT/reorder_matrix.m b/DefaultData/2019_03_03_BCT/reorder_matrix.m deleted file mode 100755 index 8bab68b..0000000 --- a/DefaultData/2019_03_03_BCT/reorder_matrix.m +++ /dev/null @@ -1,105 +0,0 @@ -function [Mreordered,Mindices,cost] = reorder_matrix(M1,cost,flag) -% REORDER_MATRIX Matrix reordering for visualization -% -% [Mreordered,Mindices,cost] = reorder_matrix(M1,cost,flag) -% -% This function rearranges the nodes in matrix M1 such that the matrix -% elements are squeezed along the main diagonal. The function uses a -% version of simulated annealing. -% -% Inputs: M1 = connection matrix (weighted or binary, -% directed or undirected) -% cost = 'line' or 'circ', for shape of lattice -% cost (linear or ring lattice) -% -% Mreordered = reordered connection matrix -% Mindices = reordered indices -% cost = distance between M1 and Mreordered -% -% Note that in general, the outcome will depend on the initial condition -% (the setting of the random number seed). Also, there is no good way to -% determine optimal annealing parameters in advance - these paramters -% will need to be adjusted "by hand" (particularly H, Texp, and T0). -% For large and/or dense matrices, it is highly recommended to perform -% exploratory runs varying the settings of 'H' and 'Texp' and then select -% the best values. -% -% Based on extensive testing, it appears that T0 and Hbrk can remain -% unchanged in most cases. Texp may be varied from 1-1/H to 1-10/H, for -% example. H is the most important parameter - set to larger values as -% the problem size increases. It is advisable to run this function -% multiple times and select the solution(s) with the lowest 'cost'. -% -% Setting 'Texp' to zero cancels annealing and uses a greedy algorithm -% instead. -% -% Yusuke Adachi, University of Tokyo 2010 -% Olaf Sporns, Indiana University 2010 - -N = size(M1,1); - -% generate cost function -if (strcmp(cost,'line')) - profil = fliplr(normpdf(1:N,0,N/2)); -end; -if (strcmp(cost,'circ')) - profil = fliplr(normpdf(1:N,N/2,N/4)); -end; -COST = (toeplitz(profil,profil).*~eye(N)); -COST = COST./sum(sum(COST)); - -% establish maxcost, lowcost, mincost -maxcost = sum(sort(COST(:)).*(sort(M1(:)))); -lowcost = sum(sum(M1.*COST))/maxcost; -mincost = lowcost; - -% initialize -anew = 1:N; -amin = 1:N; -h = 0; hcnt = 0; - -% set annealing parameters -% H determines the maximal number of steps -% Texp determines the steepness of the temperature gradient -% T0 sets the initial temperature (and scales the energy term) -% Hbrk sets a break point for the simulation (if no further improvement) -H = 1e04; Texp = 1-10/H; T0 = 1e-03; Hbrk = H/10; -%Texp = 0; - -while hHbrk) - break; - end; - % current temperature - T = T0*Texp^h; - % choose two positions at random and flip them - atmp = anew; - %r = randperm(N); % slower - r = ceil(rand(1,2).*N); - atmp(r(1)) = anew(r(2)); - atmp(r(2)) = anew(r(1)); - costnew = sum(sum(M1(atmp,atmp).*COST))/maxcost; - % annealing - if (costnew < lowcost) || (rand < exp(-(costnew-lowcost)/T)) - anew = atmp; - lowcost = costnew; - % is this a new absolute best? - if (lowcost0)); -% Gprob_SPL = mean(rob_SPL(~eye(N)>0)); -% -% -% Reference: Goñi J, et al (2013) PLoS ONE -% -% -% Joaquin Goñi, IU Bloomington, 2012 - - -N = size(adj,1); -EYE = logical(eye(N,N)); - -flagResources = ~isnan(lambda); - -if flagResources - if lambda<=0 || lambda>=1 - error('p_req_values must be non-zero probabilities') - end - z = zeros(N,N); -end -if nargin<4 - SPL = distance_wei_floyd(adj); -end -if nargin<5 - M = diag(sum(adj,2))\adj; -end - -Lvalues = unique(SPL(:)); -Lvalues = Lvalues(~(Lvalues==0)); - -prob_SPL = zeros(N,N); % a priori zero probability of going through SPL among nodes - -for indexSPL=1:length(Lvalues) %for each possible value of SPL for current component - SPLvalue = Lvalues(indexSPL); - [~, hcols] = find(SPL==SPLvalue); - hvector = unique(hcols); clear hrows hcols - entries = SPL==SPLvalue; - - if flagResources % compute Eres - [prob_aux,z_aux] = prob_first_particle_arrival(M,SPLvalue,hvector,lambda); - else % not compute Eres - [prob_aux] = prob_first_particle_arrival(M,SPLvalue,hvector,[]); - end - - prob_aux(~entries) = 0; - prob_SPL = prob_SPL + prob_aux; - - if flagResources - z_aux(~entries) = 0; - z = z + z_aux; - end -end - -prob_SPL(EYE) = 0; - -if flagResources - z(prob_SPL==1) = 1; - Eres = 1./z; - Eres(EYE) = 0; -else - Eres = nan; -end - -function [prob,resources] = prob_first_particle_arrival(M,L,hvector,lambda) - -N = size(M,1); -prob = zeros(N,N); - -if nargin<4 - hvector=1:N; -end - -flagResources = ~isnan(lambda); - -if flagResources - if lambda<=0 || lambda>=1 - error('p_req_values must be non-zero probabilities') - end - resources = zeros(N,N); -end - -for hindex=1:length(hvector) %for each destination node h - h = hvector(hindex); - B_h = M; - B_h(h,:) = 0; B_h(h,h) = 1; % h becomes absorbant state. - - B_h_L = B_h^L; - - term = 1-B_h_L(:,h); - - prob(:,h)= 1-term; - - if flagResources - resources(:,h) = repmat(log(1-lambda),N,1)./repmat(log(term),1); - end -end diff --git a/DefaultData/2019_03_03_BCT/retrieve_shortest_path.m b/DefaultData/2019_03_03_BCT/retrieve_shortest_path.m deleted file mode 100755 index 1213597..0000000 --- a/DefaultData/2019_03_03_BCT/retrieve_shortest_path.m +++ /dev/null @@ -1,37 +0,0 @@ -function path = retrieve_shortest_path(s,t,hops,Pmat) -% RETRIEVE_SHORTEST_PATH Retrieval of shortest path -% -% This function finds the sequence of nodes that comprise the shortest -% path between a given source and target node. -% -% Inputs: -% s, -% Source node: i.e. node where the shortest path begins. -% t, -% Target node: i.e. node where the shortest path ends. -% hops, -% Number of edges in the path. This matrix may be obtained as the -% second output argument of the function "distance_wei_floyd.m". -% Pmat, -% Pmat is a matrix whose elements {k,t} indicate the next node in -% the shortest path between k and t. This matrix may be obtained -% as the third output of the function "distance_wei_floyd.m" -% -% Output: -% path, -% Nodes comprising the shortest path between nodes s and t. -% -% -% Andrea Avena-Koenigsberger and Joaquin Goñi, IU, 2012 - -path_length = hops(s,t); -if path_length ~= 0 - path = nan(path_length+1,1); - path(1) = s; - for ind = 2:length(path) - s = Pmat(s,t); - path(ind) = s; - end -else - path = []; -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/rich_club_bd.m b/DefaultData/2019_03_03_BCT/rich_club_bd.m deleted file mode 100755 index 80af3fc..0000000 --- a/DefaultData/2019_03_03_BCT/rich_club_bd.m +++ /dev/null @@ -1,53 +0,0 @@ -function [R,Nk,Ek] = rich_club_bd(CIJ,varargin) -%RICH_CLUB_BD Rich club coefficients (binary directed graph) -% -% R = rich_club_bd(CIJ) -% [R,Nk,Ek] = rich_club_bd(CIJ,klevel) -% -% The rich club coefficient, R, at level k is the fraction of edges that -% connect nodes of degree k or higher out of the maximum number of edges -% that such nodes might share. -% -% Input: CIJ, connection matrix, binary and directed -% klevel, optional input argument. klevel sets the -% maximum level at which the rich club -% coefficient will be calculated. If klevel is -% not included the the maximum level will be -% set to the maximum degree of CIJ. -% -% Output: R, vector of rich-club coefficients for levels -% 1 to klevel. -% Nk, number of nodes with degree>k -% Ek, number of edges remaining in subgraph with -% degree>k -% -% Reference: Colizza et al. (2006) Nat. Phys. 2:110. -% -% Martijn van den Heuvel, University Medical Center Utrecht, 2011 - -N = size(CIJ,1); %#ok - -% definition of "degree" as used for RC coefficients -% degree is taken to be the sum of incoming and outgoing connectons -[~,~,degree] = degrees_dir(CIJ); - -if nargin == 1 - klevel = max(degree); -elseif nargin == 2 - klevel = varargin{1}; -elseif nargin > 2 - error('number of inputs incorrect. Should be [CIJ], or [CIJ, klevel]') -end - -R = zeros(1,klevel); -Nk = zeros(1,klevel); -Ek = zeros(1,klevel); -for k = 1:klevel - SmallNodes=find(degree<=k); %get 'small nodes' with degree <=k - subCIJ=CIJ; %extract subnetwork of nodes >k by removing nodes <=k of CIJ - subCIJ(SmallNodes,:)=[]; %remove rows - subCIJ(:,SmallNodes)=[]; %remove columns - Nk(k)=size(subCIJ,2); %number of nodes with degree >k - Ek(k)=sum(subCIJ(:)); %total number of connections in subgraph - R(k)=Ek(k)/(Nk(k)*(Nk(k)-1)); %unweighted rich-club coefficient -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/rich_club_bu.m b/DefaultData/2019_03_03_BCT/rich_club_bu.m deleted file mode 100755 index e7e7651..0000000 --- a/DefaultData/2019_03_03_BCT/rich_club_bu.m +++ /dev/null @@ -1,49 +0,0 @@ -function [R,Nk,Ek] = rich_club_bu(CIJ,varargin) -%RICH_CLUB_BU Rich club coefficients (binary undirected graph) -% -% R = rich_club_bu(CIJ) -% [R,Nk,Ek] = rich_club_bu(CIJ,klevel) -% -% The rich club coefficient, R, at level k is the fraction of edges that -% connect nodes of degree k or higher out of the maximum number of edges -% that such nodes might share. -% -% Input: CIJ, connection matrix, binary and undirected -% klevel, optional input argument. klevel sets the -% maximum level at which the rich club -% coefficient will be calculated. If klevel is -% not included the the maximum level will be -% set to the maximum degree of CIJ. -% -% Output: R, vector of rich-club coefficients for levels -% 1 to klevel. -% Nk, number of nodes with degree>k -% Ek, number of edges remaining in subgraph with -% degree>k -% -% Reference: Colizza et al. (2006) Nat. Phys. 2:110. -% -% Martijn van den Heuvel, University Medical Center Utrecht, 2011 - -Degree = sum(CIJ); %compute degree of each node - -if nargin == 1 - klevel = max(Degree); -elseif nargin == 2 - klevel = varargin{1}; -elseif nargin > 2 - error('number of inputs incorrect. Should be [CIJ], or [CIJ, klevel]') -end - -R = zeros(1,klevel); -Nk = zeros(1,klevel); -Ek = zeros(1,klevel); -for k = 1:klevel - SmallNodes=find(Degree<=k); %get 'small nodes' with degree <=k - subCIJ=CIJ; %extract subnetwork of nodes >k by removing nodes <=k of CIJ - subCIJ(SmallNodes,:)=[]; %remove rows - subCIJ(:,SmallNodes)=[]; %remove columns - Nk(k)=size(subCIJ,2); %number of nodes with degree >k - Ek(k)=sum(subCIJ(:)); %total number of connections in subgraph - R(k)=Ek(k)/(Nk(k)*(Nk(k)-1)); %unweighted rich-club coefficient -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/rich_club_wd.m b/DefaultData/2019_03_03_BCT/rich_club_wd.m deleted file mode 100755 index 679c932..0000000 --- a/DefaultData/2019_03_03_BCT/rich_club_wd.m +++ /dev/null @@ -1,75 +0,0 @@ -function [Rw] = rich_club_wd(CIJ,varargin) -%RICH_CLUB_WD Rich club coefficients curve (weighted directed graph) -% -% Rw = rich_club_wd(CIJ,varargin) -% -% The weighted rich club coefficient, Rw, at level k is the fraction of -% edge weights that connect nodes of degree k or higher out of the -% maximum edge weights that such nodes might share. -% -% Inputs: -% CIJ: weighted directed connection matrix -% -% k-level: (optional) max level of RC(k). -% (by default k-level quals the maximal degree of CIJ) -% -% Output: -% Rw: rich-club curve -% -% -% References: -% T Opsahl et al. Phys Rev Lett, 2008, 101(16) -% M van den Heuvel, O Sporns, J Neurosci 2011 31(44) -% -% Martijn van den Heuvel, University Medical Center Utrecht, 2011 - -% Modification History: -% 2011: Original -% 2015: Expanded documentation (Mika Rubinov) - - -NofNodes = size(CIJ,2); %#ok %number of nodes -NodeDegree = sum((CIJ~=0))+sum((CIJ'~=0)); %define degree of each node (indegree + outdegree) - -%define to which level rc should be computed -if size(varargin,2)==1 - klevel = varargin{1}; -elseif isempty(varargin) - klevel = max(NodeDegree); -else - error('number of inputs incorrect. Should be [CIJ], or [CIJ, klevel]') -end - - -%wrank contains the ranked weights of the network, with strongest connections on top - -wrank = sort(CIJ(:), 'descend'); - -%loop over all possible k-levels -for kk = 1:klevel - - SmallNodes=find(NodeDegree - continue - end - - %remove small nodes with NodeDegreer - Wr = sum(CutoutCIJ(:)); - - %total number of connections in subset E>r - Er = length(find(CutoutCIJ~=0)); - - %E>r number of connections with max weight in network - wrank_r = wrank(1:1:Er); - - %weighted rich-club coefficient - Rw(kk)=Wr / sum(wrank_r); - -end diff --git a/DefaultData/2019_03_03_BCT/rich_club_wu.m b/DefaultData/2019_03_03_BCT/rich_club_wu.m deleted file mode 100755 index c6523af..0000000 --- a/DefaultData/2019_03_03_BCT/rich_club_wu.m +++ /dev/null @@ -1,72 +0,0 @@ -function [Rw] = rich_club_wu(CIJ,varargin) -%RICH_CLUB_WU Rich club coefficients curve (weighted undirected graph) -% -% Rw = rich_club_wu(CIJ,varargin) % rich club curve for weighted graph -% -% The weighted rich club coefficient, Rw, at level k is the fraction of -% edge weights that connect nodes of degree k or higher out of the -% maximum edge weights that such nodes might share. -% -% Inputs: -% CIJ: weighted directed connection matrix -% -% k-level: (optional) max level of RC(k). -% (by default k-level quals the maximal degree of CIJ) -% -% Output: -% Rw: rich-club curve -% -% -% References: -% T Opsahl et al. Phys Rev Lett, 2008, 101(16) -% M van den Heuvel, O Sporns, J Neurosci 2011 31(44) -% -% Martijn van den Heuvel, University Medical Center Utrecht, 2011 - -% Modification History: -% 2011: Original -% 2015: Expanded documentation (Mika Rubinov) - - -NofNodes = size(CIJ,2); %#ok %number of nodes -NodeDegree = sum((CIJ~=0)); %define degree of each node - -%define to which level rc should be computed -if size(varargin,2)==1 - klevel = varargin{1}; -elseif isempty(varargin) - klevel = max(NodeDegree); -else - error('number of inputs incorrect. Should be [CIJ], or [CIJ, klevel]') -end - -%wrank contains the ranked weights of the network, with strongest connections on top -wrank = sort(CIJ(:), 'descend'); - -%loop over all possible k-levels -for kk = 1:klevel - - SmallNodes=find(NodeDegree - continue - end - - %remove small nodes with NodeDegreer - Wr = sum(CutoutCIJ(:)); - - %total number of connections in subset E>r - Er = length(find(CutoutCIJ~=0)); - - %E>r number of connections with max weight in network - wrank_r = wrank(1:1:Er); - - %weighted rich-club coefficient - Rw(kk)=Wr / sum(wrank_r); -end diff --git a/DefaultData/2019_03_03_BCT/rout_efficiency.m b/DefaultData/2019_03_03_BCT/rout_efficiency.m deleted file mode 100755 index 1ad04bd..0000000 --- a/DefaultData/2019_03_03_BCT/rout_efficiency.m +++ /dev/null @@ -1,87 +0,0 @@ -function [GErout,Erout,Eloc] = rout_efficiency(D,transform) -% ROUT_EFFICIENCY Mean, pair-wise and local routing efficiency -% -% [GErout,Erout,Eloc] = rout_efficiency(D,transform); -% -% The routing efficiency is the average of inverse shortest path length. -% -% The local routing efficiency of a node u is the routing efficiency -% computed on the subgraph formed by the neighborhood of node u -% (excluding node u). -% -% -% Inputs: -% -% D, -% Weighted/unweighted directed/undirected -% connection *weight* OR *length* matrix. -% -% transform, -% If the input matrix is a connection *weight* matrix, specify a -% transform that map input connection weights to connection -% lengths. Two transforms are available. -% 'log' -> l_ij = -log(w_ij) -% 'inv' -> l_ij = 1/w_ij -% -% If the input matrix is a connection *length* matrix, do not -% specify a transform (or specify an empty transform argument). -% -% -% Outputs: -% -% GErout, -% Mean global routing efficiency (scalar). -% -% Erout, -% Pair-wise routing efficiency (matrix). -% -% Eloc, -% Local efficiency (vector) -% -% -% Note: -% -% The input matrix may be either a connection weight matrix, or a -% connection length matrix. The connection length matrix is typically -% obtained with a mapping from weight to length, such that higher -% weights are mapped to shorter lengths (see above). -% -% -% Algorithm: Floyd–Warshall Algorithm -% -% -% References: -% Latora and Marchiori (2001) Phys Rev Lett -% Goñi et al (2013) PLoS ONE -% Avena-Koenigsberger et al (2016) Brain Structure and Function -% -% -% Andrea Avena-Koenigsberger and Joaquin Goñi, IU Bloomington, 2012 -% - -% Modification history -% 2012 - original -% 2016 - included comutation of local efficiency -% 2016 - included transform variable that maps strengths onto distances - - -if ~exist('transform','var') - transform = []; -end - -n=length(D); % number of nodes - -Erout = distance_wei_floyd(D,transform); % pair-wise routing efficiency -Erout = 1./Erout; -Erout(eye(n)>0) = 0; -GErout = sum(Erout(~eye(n)>0))/(n^2-n); % global routing efficiency - -if nargout == 3 - Eloc = zeros(n,1); - for u = 1:n - Gu = find(D(u,:) | D(:,u).'); % u's neighbors - nGu = length(Gu); - e = distance_wei_floyd(D(Gu,Gu),transform); - Eloc(u) = sum(sum(1./e(~eye(nGu)>0)))/nGu; % efficiency of subgraph Gu - end -end diff --git a/DefaultData/2019_03_03_BCT/score_wu.m b/DefaultData/2019_03_03_BCT/score_wu.m deleted file mode 100755 index 08b926c..0000000 --- a/DefaultData/2019_03_03_BCT/score_wu.m +++ /dev/null @@ -1,40 +0,0 @@ -function [CIJscore,sn] = score_wu(CIJ,s) -%SCORE_WU S-score -% -% [CIJscore,sn] = score_wu(CIJ,s); -% -% The s-core is the largest subnetwork comprising nodes of strength at -% least s. This function computes the s-core for a given weighted -% undirected connection matrix. Computation is analogous to the more -% widely used k-core, but is based on node strengths instead of node -% degrees. -% -% input: CIJ, connection/adjacency matrix (weighted, undirected) -% s, level of s-core. Note: s can take on any fractional value -% -% output: CIJscore, connection matrix of the s-core. This matrix -% contains only nodes with a strength of at least s. -% sn, size of s-score -% -% Olaf Sporns, Indiana University, 2007/2008/2010/2012 - -while 1 - - % get strengths of matrix - [str] = strengths_und(CIJ); - - % find nodes with strength 0)); - - % if none found -> stop - if (isempty(ff)) break; end; %#ok - - % peel found nodes - CIJ(ff,:) = 0; - CIJ(:,ff) = 0; - -end; - -CIJscore = CIJ; -sn = sum(str>0); - diff --git a/DefaultData/2019_03_03_BCT/search_information.m b/DefaultData/2019_03_03_BCT/search_information.m deleted file mode 100755 index 7791473..0000000 --- a/DefaultData/2019_03_03_BCT/search_information.m +++ /dev/null @@ -1,117 +0,0 @@ -function SI = search_information(W, L, has_memory) -% SEARCH_INFORMATION Search information -% -% SI = search_information(W, L,has_memory) -% -% Computes the amount of information (measured in bits) that a random -% walker needs to follow the shortest path between a given pair of nodes. -% -% Inputs: -% -% W -% Weighted/unweighted directed/undirected -% connection weight matrix. -% -% L -% Weighted/unweighted directed/undirected -% connection length matrix. -% -% has_memory, -% This flag defines whether or not the random walker "remembers" -% its previous step, which has the effect of reducing the amount -% of information needed to find the next state. If this flag is -% not set, the walker has no memory by default. -% -% -% Outputs: -% -% SI, -% pair-wise search information (matrix). Note that SI(i,j) may be -% different from SI(j,i), hense, SI is not a symmetric matrix -% even when adj is symmetric. -% -% -% References: Rosvall et al. (2005) Phys Rev Lett 94, 028701 -% Goñi et al (2014) PNAS doi: 10.1073/pnas.131552911 -% -% -% Andrea Avena-Koenigsberger and Joaquin Goñi, IU Bloomington, 2014 -% Caio Seguin, University of Melbourne, 2019 - -% Modification history -% 2014 - original -% 2016 - included SPL transform option and generalized for -% symmetric/asymmetric networks -% 2019 - modified to make user directly specify weight-to-length transformations - - -if ~exist('has_memory','var') - has_memory = false; -end - -N = size(W,1); - -if issymmetric(W) - flag_triu = true; -else - flag_triu = false; -end - -T = diag(sum(W,2))\W; -[~,hops,Pmat] = distance_wei_floyd(L,[]); % Compute shortest paths based on L - -SI = zeros(N,N); -SI(eye(N)>0) = nan; - -for i = 1:N - for j = 1:N - if (j > i && flag_triu) || (~flag_triu && i ~= j) - path = retrieve_shortest_path(i,j,hops,Pmat); - lp = length(path); - if flag_triu - if ~isempty(path) - pr_step_ff = nan(1,lp-1); - pr_step_bk = nan(1,lp-1); - if has_memory - pr_step_ff(1) = T(path(1),path(2)); - pr_step_bk(lp-1) = T(path(lp),path(lp-1)); - for z=2:lp-1 - pr_step_ff(z) = T(path(z),path(z+1))/(1 - T(path(z-1),path(z))); - pr_step_bk(lp-z) = T(path(lp-z+1),path(lp-z))/(1 - T(path(lp-z+2),path(lp-z+1))); - end - else - for z=1:length(path)-1 - pr_step_ff(z) = T(path(z),path(z+1)); - pr_step_bk(z) = T(path(z+1),path(z)); - end - end - prob_sp_ff = prod(pr_step_ff); - prob_sp_bk = prod(pr_step_bk); - SI(i,j) = -log2(prob_sp_ff); - SI(j,i) = -log2(prob_sp_bk); - else - SI(i,j) = inf; - SI(j,i) = inf; - end - else - if ~isempty(path) - pr_step_ff = nan(1,lp-1); - if has_memory - pr_step_ff(1) = T(path(1),path(2)); - for z=2:lp-1 - pr_step_ff(z) = T(path(z),path(z+1))/(1 - T(path(z-1),path(z))); - end - else - for z=1:length(path)-1 - pr_step_ff(z) = T(path(z),path(z+1)); - end - end - prob_sp_ff = prod(pr_step_ff); - SI(i,j) = -log2(prob_sp_ff); - else - SI(i,j) = inf; - end - end - end - end -end diff --git a/DefaultData/2019_03_03_BCT/strengths_dir.m b/DefaultData/2019_03_03_BCT/strengths_dir.m deleted file mode 100755 index ac4726e..0000000 --- a/DefaultData/2019_03_03_BCT/strengths_dir.m +++ /dev/null @@ -1,27 +0,0 @@ -function [is,os,str] = strengths_dir(CIJ) -%STRENGTHS_DIR In-strength and out-strength -% -% [is,os,str] = strengths_dir(CIJ); -% -% Node strength is the sum of weights of links connected to the node. The -% instrength is the sum of inward link weights and the outstrength is the -% sum of outward link weights. -% -% Input: CIJ, directed weighted connection matrix -% -% Output: is, node instrength -% os, node outstrength -% str, node strength (instrength + outstrength) -% -% Notes: Inputs are assumed to be on the columns of the CIJ matrix. -% -% -% Olaf Sporns, Indiana University, 2002/2006/2008 - - -% compute strengths -is = sum(CIJ,1); % instrength = column sum of CIJ -os = sum(CIJ,2)'; % outstrength = row sum of CIJ -str = is+os; % strength = instrength+outstrength - - diff --git a/DefaultData/2019_03_03_BCT/strengths_und.m b/DefaultData/2019_03_03_BCT/strengths_und.m deleted file mode 100755 index 4eb844a..0000000 --- a/DefaultData/2019_03_03_BCT/strengths_und.m +++ /dev/null @@ -1,18 +0,0 @@ -function [str] = strengths_und(CIJ) -%STRENGTHS_UND Strength -% -% str = strengths_und(CIJ); -% -% Node strength is the sum of weights of links connected to the node. -% -% Input: CIJ, undirected weighted connection matrix -% -% Output: str, node strength -% -% -% Olaf Sporns, Indiana University, 2002/2006/2008 - -% compute strengths -str = sum(CIJ); % strength - - diff --git a/DefaultData/2019_03_03_BCT/strengths_und_sign.m b/DefaultData/2019_03_03_BCT/strengths_und_sign.m deleted file mode 100755 index 9d3d66a..0000000 --- a/DefaultData/2019_03_03_BCT/strengths_und_sign.m +++ /dev/null @@ -1,30 +0,0 @@ -function [Spos,Sneg,vpos,vneg] = strengths_und_sign(W) -%STRENGTHS_UND_SIGN Strength and weight -% -% [Spos Sneg] = strengths_und_sign(W); -% [Spos Sneg vpos vneg] = strengths_und_sign(W); -% -% Node strength is the sum of weights of links connected to the node. -% -% Inputs: W, undirected connection matrix with positive -% and negative weights -% -% Output: Spos/Sneg, nodal strength of positive/negative weights -% vpos/vneg, total positive/negative weight -% -% -% 2011, Mika Rubinov, UNSW - -% Modification History: -% Mar 2011: Original - - -n = length(W); %number of nodes -W(1:n+1:end) = 0; %clear diagonal -Spos = sum( W.*(W>0)); %positive strengths -Sneg = sum(-W.*(W<0)); %negative strengths - -if nargout>2 - vpos = sum(Spos); %positive weight - vneg = sum(Sneg); %negative weight -end \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/subgraph_centrality.m b/DefaultData/2019_03_03_BCT/subgraph_centrality.m deleted file mode 100755 index f753098..0000000 --- a/DefaultData/2019_03_03_BCT/subgraph_centrality.m +++ /dev/null @@ -1,24 +0,0 @@ -function Cs = subgraph_centrality(CIJ) -% SUBGRAPH_CENTRALITY Subgraph centrality of a network -% -% Cs = subgraph_centrality(CIJ) -% -% The subgraph centrality of a node is a weighted sum of closed walks of -% different lengths in the network starting and ending at the node. This -% function returns a vector of subgraph centralities for each node of the -% network. -% -% Inputs: CIJ, adjacency matrix (binary) -% -% Outputs: Cs, subgraph centrality -% -% Reference: Estrada and Rodriguez-Velasquez (2005) Phys Rev E 71, 056103 -% Estrada and Higham (2010) SIAM Rev 52, 696. -% -% Xi-Nian Zuo, Chinese Academy of Sciences, 2010 -% Rick Betzel, Indiana University, 2012 - -[V,lambda] = eig(CIJ); % Compute the eigenvectors and -lambda = diag(lambda); % eigenvalues. -V2 = V.^2; % Matrix of squares of the eigenvectors elements. -Cs = real(V2 * exp(lambda)); % Compute eigenvector centrality. Lop off imaginary part remaining due to precision error. \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/threshold_absolute.m b/DefaultData/2019_03_03_BCT/threshold_absolute.m deleted file mode 100755 index e95bc06..0000000 --- a/DefaultData/2019_03_03_BCT/threshold_absolute.m +++ /dev/null @@ -1,19 +0,0 @@ -function W = threshold_absolute(W, thr) -% THRESHOLD_ABSOLUTE Absolute thresholding -% -% W_thr = threshold_absolute(W, thr); -% -% This function thresholds the connectivity matrix by absolute weight -% magnitude. All weights below the given threshold, and all weights -% on the main diagonal (self-self connections) are set to 0. -% -% Inputs: W weighted or binary connectivity matrix -% thr weight treshold -% -% Output: W_thr thresholded connectivity matrix -% -% -% Mika Rubinov, UNSW, 2009-2010 - -W(1:size(W,1)+1:end)=0; %clear diagonal -W(Wj edge pairs (these do not generate triangles). The number of -% false pairs is the main diagonal of A^2. Thus the maximum possible -% number of triangles = (2 edges)*([ALL PAIRS] - [FALSE PAIRS]) -% = 2 * (K(K-1)/2 - diag(A^2)) -% = K(K-1) - 2(diag(A^2)) - -S = A+A.'; % symmetrized input graph -K = sum(S,2); % total degree (in + out) -cyc3 = diag(S^3)/2; % number of 3-cycles (ie. directed triangles) -CYC3 = K.*(K-1)-2*diag(A^2); % number of all possible 3-cycles -T = sum(cyc3)./sum(CYC3); % transitivity diff --git a/DefaultData/2019_03_03_BCT/transitivity_bu.m b/DefaultData/2019_03_03_BCT/transitivity_bu.m deleted file mode 100755 index 683b817..0000000 --- a/DefaultData/2019_03_03_BCT/transitivity_bu.m +++ /dev/null @@ -1,20 +0,0 @@ -function [C_tri]=transitivity_bu(A) -%TRANSITIVITY_BU Transitivity -% -% T = transitivity_bu(A); -% -% Transitivity is the ratio of 'triangles to triplets' in the network. -% (A classical version of the clustering coefficient). -% -% Input: A binary undirected connection matrix -% -% Output: T transitivity scalar -% -% Reference: e.g. Humphries et al. (2008) Plos ONE 3: e0002051 -% -% -% Alexandros Goulas, Maastricht University, 2010 - - C_tri = trace(A^3) / (sum(sum(A^2)) - trace(A^2)); - -return; \ No newline at end of file diff --git a/DefaultData/2019_03_03_BCT/transitivity_wd.m b/DefaultData/2019_03_03_BCT/transitivity_wd.m deleted file mode 100755 index 6809956..0000000 --- a/DefaultData/2019_03_03_BCT/transitivity_wd.m +++ /dev/null @@ -1,47 +0,0 @@ -function T=transitivity_wd(W) -%TRANSITIVITY_WD Transitivity -% -% T = transitivity_wd(W); -% -% Transitivity is the ratio of 'triangles to triplets' in the network. -% (A classical version of the clustering coefficient). -% -% Input: W weighted directed connection matrix -% -% Output: T transitivity scalar -% -% Note: All weights must be between 0 and 1. -% This may be achieved using the weight_conversion.m function, -% W_nrm = weight_conversion(W, 'normalize'); -% -% Reference: Rubinov M, Sporns O (2010) NeuroImage 52:1059-69 -% based on Fagiolo (2007) Phys Rev E 76:026107. -% -% -% Contributors: -% Mika Rubinov, UNSW/University of Cambridge -% Christoph Schmidt, Friedrich Schiller University Jena -% Andrew Zalesky, University of Melbourne -% 2007-2015 - -% Modification history: -% 2007: original (MR) -% 2013, 2015: removed tests for absence of nodewise 3-cycles (CS,AZ) -% 2015: Expanded documentation - - -% Methodological note (also see note for clustering_coef_bd) -% The weighted modification is as follows: -% - The numerator: adjacency matrix is replaced with weights matrix ^ 1/3 -% - The denominator: no changes from the binary version -% -% The above reduces to symmetric and/or binary versions of the clustering -% coefficient for respective graphs. - -A = W~=0; % adjacency matrix -S = W.^(1/3)+(W.').^(1/3); % symmetrized weights matrix ^1/3 -K = sum(A+A.',2); % total degree (in + out) -cyc3 = diag(S^3)/2; % number of 3-cycles (ie. directed triangles) -CYC3 = K.*(K-1)-2*diag(A^2); % number of all possible 3-cycles -T = sum(cyc3)./sum(CYC3); % transitivity - diff --git a/DefaultData/2019_03_03_BCT/transitivity_wu.m b/DefaultData/2019_03_03_BCT/transitivity_wu.m deleted file mode 100755 index 232d7be..0000000 --- a/DefaultData/2019_03_03_BCT/transitivity_wu.m +++ /dev/null @@ -1,29 +0,0 @@ -function T=transitivity_wu(W) -%TRANSITIVITY_WU Transitivity -% -% T = transitivity_wu(W); -% -% Transitivity is the ratio of 'triangles to triplets' in the network. -% (A classical version of the clustering coefficient). -% -% Input: W weighted undirected connection matrix -% -% Output: T transitivity scalar -% -% Note: All weights must be between 0 and 1. -% This may be achieved using the weight_conversion.m function, -% W_nrm = weight_conversion(W, 'normalize'); -% -% Reference: Rubinov M, Sporns O (2010) NeuroImage 52:1059-69 -% based on Onnela et al. (2005) Phys Rev E 71:065103 -% -% -% Mika Rubinov, UNSW/U Cambridge, 2010-2015 - -% Modification history: -% 2010: Original -% 2015: Expanded documentation - -K = sum(W~=0,2); -cyc3 = diag((W.^(1/3))^3); -T = sum(cyc3)./sum((K.*(K-1))); %transitivity diff --git a/DefaultData/2019_03_03_BCT/weight_conversion.m b/DefaultData/2019_03_03_BCT/weight_conversion.m deleted file mode 100755 index f6b93f0..0000000 --- a/DefaultData/2019_03_03_BCT/weight_conversion.m +++ /dev/null @@ -1,88 +0,0 @@ -function W = weight_conversion(W, wcm) -% WEIGHT_CONVERSION Conversion of weights in input matrix -% -% W_bin = weight_conversion(W, 'binarize'); -% W_nrm = weight_conversion(W, 'normalize'); -% L = weight_conversion(W, 'lengths'); -% W_fix = weight_conversion(W, 'autofix'); -% -% This function may either binarize an input weighted connection matrix, -% normalize an input weighted connection matrix, convert an input -% weighted connection matrix to a weighted connection-length matrix, or -% fix common connection problems in binary or weighted connection matrices. -% -% Binarization converts all present connection weights to 1. -% -% Normalization rescales all weight magnitudes to the range [0,1] and -% should be done prior to computing some weighted measures, such as the -% weighted clustering coefficient. -% -% Conversion of connection weights to connection lengths is needed -% prior to computation of weighted distance-based measures, such as -% distance and betweenness centrality. In a weighted connection network, -% higher weights are naturally interpreted as shorter lengths. The -% connection-lengths matrix here is defined as the inverse of the -% connection-weights matrix. -% -% Autofix removes all Inf and NaN values, remove all self connections -% (sets all weights on the main diagonal to 0), ensures that symmetric matrices -% are exactly symmetric (by correcting for round-off error), and ensures that -% binary matrices are exactly binary (by correcting for round-off error). -% -% Inputs: W binary or weighted connectivity matrix -% wcm weight-conversion command - possible values: -% 'binarize' binarize weights -% 'normalize' normalize weights - -% 'lengths' convert weights to lengths -% 'autofix' fixes common weights problems -% -% Output: W_ output connectivity matrix -% -% -% Mika Rubinov, U Cambridge, 2012 - -% Modification History: -% Sep 2012: Original -% Jan 2015: Added autofix feature. -% Jan 2017: Corrected bug in autofix (thanks to Jeff Spielberg) - -switch wcm - case 'binarize' - W=double(W~=0); % binarize - case 'normalize' - W=W./max(abs(W(:))); % rescale by maximal weight - case 'lengths' - E=find(W); - W(E)=1./W(E); % invert weights - case 'autofix' - % clear diagonal - n = length(W); - W(1:n+1:end)=0; - - % remove Infs and NaNs - idx = isnan(W) | isinf(W); - if any(any(idx)) - W(idx)=0; - end - - % ensure exact binariness - U = unique(W); - if nnz(U) > 1 - idx_0 = abs(W ) < 1e-10; - idx_1 = abs(W-1) < 1e-10; - if all(all(idx_0 | idx_1)) - W(idx_0)=0; - W(idx_1)=1; - end - end - - % ensure exact symmetry - if ~isequal(W,W.') - if max(max(abs(W-W.'))) < 1e-10 - W=(W+W).'/2; - end - end - otherwise - error('Unknown weight-conversion command.') -end diff --git a/DefaultData/2019_03_03_BCT/writetoPAJ.m b/DefaultData/2019_03_03_BCT/writetoPAJ.m deleted file mode 100755 index 3827786..0000000 --- a/DefaultData/2019_03_03_BCT/writetoPAJ.m +++ /dev/null @@ -1,41 +0,0 @@ -function writetoPAJ(CIJ, fname, arcs) -%WRITETOPAJ Write to Pajek -% -% writetoPAJ(CIJ, fname, arcs); -% -% This function writes a Pajek .net file from a MATLAB matrix -% -% Inputs: CIJ, adjacency matrix -% fname, filename minus .net extension -% arcs, 1 for directed network -% 0 for an undirected network -% -% Chris Honey, Indiana University, 2007 - - -N = size(CIJ,1); -fid = fopen(cat(2,fname,'.net'), 'w'); - -%%%VERTICES -fprintf(fid, '*vertices %6i \r', N); -for i = 1:N - fprintf(fid, '%6i "%6i" \r', [i i]); -end - -%%%ARCS/EDGES -if arcs - fprintf(fid, '*arcs \r'); -else - fprintf(fid, '*edges \r'); -end - -for i = 1:N - for j = 1:N - if CIJ(i,j) ~= 0 - fprintf(fid, '%6i %6i %6f \r', [i j CIJ(i,j)]); - end - end -end - -fclose(fid); - diff --git a/DefaultData/Underlay.nii b/DefaultData/Underlay.nii deleted file mode 100755 index 23d0901..0000000 Binary files a/DefaultData/Underlay.nii and /dev/null differ diff --git a/DefaultData/brain.mat b/DefaultData/brain.mat deleted file mode 100755 index 4023a14..0000000 Binary files a/DefaultData/brain.mat and /dev/null differ diff --git a/DefaultData/load_nii.m b/DefaultData/load_nii.m deleted file mode 100755 index 42415a5..0000000 --- a/DefaultData/load_nii.m +++ /dev/null @@ -1,138 +0,0 @@ -% Load NIFTI or ANALYZE dataset. Support both *.nii and *.hdr/*.img -% file extension. If file extension is not provided, *.hdr/*.img will -% be used as default. -% -% A subset of NIFTI transform is included. For non-orthogonal rotation, -% shearing etc., please use 'reslice_nii.m' to reslice the NIFTI file. -% It will not cause negative effect, as long as you remember not to do -% slice time correction after reslicing the NIFTI file. Output variable -% nii will be in RAS orientation, i.e. X axis from Left to Right, -% Y axis from Posterior to Anterior, and Z axis from Inferior to -% Superior. -% -% Usage: nii = load_nii(filename, [img_idx], [dim5_idx], [dim6_idx], ... -% [dim7_idx], [old_RGB], [tolerance], [preferredForm]) -% -% filename - NIFTI or ANALYZE file name. -% -% img_idx (optional) - a numerical array of 4th dimension indices, -% which is the indices of image scan volume. The number of images -% scan volumes can be obtained from get_nii_frame.m, or simply -% hdr.dime.dim(5). Only the specified volumes will be loaded. -% All available image volumes will be loaded, if it is default or -% empty. -% -% dim5_idx (optional) - a numerical array of 5th dimension indices. -% Only the specified range will be loaded. All available range -% will be loaded, if it is default or empty. -% -% dim6_idx (optional) - a numerical array of 6th dimension indices. -% Only the specified range will be loaded. All available range -% will be loaded, if it is default or empty. -% -% dim7_idx (optional) - a numerical array of 7th dimension indices. -% Only the specified range will be loaded. All available range -% will be loaded, if it is default or empty. -% -% old_RGB (optional) - a scale number to tell difference of new RGB24 -% from old RGB24. New RGB24 uses RGB triple sequentially for each -% voxel, like [R1 G1 B1 R2 G2 B2 ...]. Analyze 6.0 from AnalyzeDirect -% uses old RGB24, in a way like [R1 R2 ... G1 G2 ... B1 B2 ...] for -% each slices. If the image that you view is garbled, try to set -% old_RGB variable to 1 and try again, because it could be in -% old RGB24. It will be set to 0, if it is default or empty. -% -% tolerance (optional) - distortion allowed in the loaded image for any -% non-orthogonal rotation or shearing of NIfTI affine matrix. If -% you set 'tolerance' to 0, it means that you do not allow any -% distortion. If you set 'tolerance' to 1, it means that you do -% not care any distortion. The image will fail to be loaded if it -% can not be tolerated. The tolerance will be set to 0.1 (10%), if -% it is default or empty. -% -% preferredForm (optional) - selects which transformation from voxels -% to RAS coordinates; values are s,q,S,Q. Lower case s,q indicate -% "prefer sform or qform, but use others if preferred not present". -% Upper case indicate the program is forced to use the specificied -% tranform or fail loading. 'preferredForm' will be 's', if it is -% default or empty. - Jeff Gunter -% -% Returned values: -% -% nii structure: -% -% hdr - struct with NIFTI header fields. -% -% filetype - Analyze format .hdr/.img (0); -% NIFTI .hdr/.img (1); -% NIFTI .nii (2) -% -% fileprefix - NIFTI filename without extension. -% -% machine - machine string variable. -% -% img - 3D (or 4D) matrix of NIFTI data. -% -% original - the original header before any affine transform. -% -% Part of this file is copied and modified from: -% http://www.mathworks.com/matlabcentral/fileexchange/1878-mri-analyze-tools -% -% NIFTI data format can be found on: http://nifti.nimh.nih.gov -% -% - Jimmy Shen (jimmy@rotman-baycrest.on.ca) -% -function nii = load_nii(filename, img_idx, dim5_idx, dim6_idx, dim7_idx, ... - old_RGB, tolerance, preferredForm) - - if ~exist('filename','var') - error('Usage: nii = load_nii(filename, [img_idx], [dim5_idx], [dim6_idx], [dim7_idx], [old_RGB], [tolerance], [preferredForm])'); - end - - if ~exist('img_idx','var') | isempty(img_idx) - img_idx = []; - end - - if ~exist('dim5_idx','var') | isempty(dim5_idx) - dim5_idx = []; - end - - if ~exist('dim6_idx','var') | isempty(dim6_idx) - dim6_idx = []; - end - - if ~exist('dim7_idx','var') | isempty(dim7_idx) - dim7_idx = []; - end - - if ~exist('old_RGB','var') | isempty(old_RGB) - old_RGB = 0; - end - - if ~exist('tolerance','var') | isempty(tolerance) - tolerance = 0.1; % 10 percent - end - - if ~exist('preferredForm','var') | isempty(preferredForm) - preferredForm= 's'; % Jeff - end - - % Read the dataset header - % - [nii.hdr,nii.filetype,nii.fileprefix,nii.machine] = load_nii_hdr(filename); - - % Read the header extension - % -% nii.ext = load_nii_ext(filename); - - % Read the dataset body - % - [nii.img,nii.hdr] = load_nii_img(nii.hdr,nii.filetype,nii.fileprefix, ... - nii.machine,img_idx,dim5_idx,dim6_idx,dim7_idx,old_RGB); - - % Perform some of sform/qform transform - % - nii = xform_nii(nii, tolerance, preferredForm); - - return % load_nii - diff --git a/DefaultData/load_nii_hdr.m b/DefaultData/load_nii_hdr.m deleted file mode 100755 index a915a74..0000000 --- a/DefaultData/load_nii_hdr.m +++ /dev/null @@ -1,320 +0,0 @@ -% Load NIFTI dataset header. Support both *.nii and *.hdr/*.img file -% extension. If file extension is not provided, *.hdr/*.img will be -% used as default. -% -% Usage: [hdr, filetype, fileprefix, machine] = load_nii_hdr(filename) -% -% filename - NIFTI file name. -% -% Returned values: -% -% hdr - struct with NIFTI header fields. -% -% filetype - 0 for Analyze format (*.hdr/*.img); -% 1 for NIFTI format in 2 files (*.hdr/*.img); -% 2 for NIFTI format in 1 file (*.nii). -% -% fileprefix - NIFTI file name without extension. -% -% machine - a string, see below for details. The default here is 'ieee-le'. -% -% 'native' or 'n' - local machine format - the default -% 'ieee-le' or 'l' - IEEE floating point with little-endian -% byte ordering -% 'ieee-be' or 'b' - IEEE floating point with big-endian -% byte ordering -% 'vaxd' or 'd' - VAX D floating point and VAX ordering -% 'vaxg' or 'g' - VAX G floating point and VAX ordering -% 'cray' or 'c' - Cray floating point with big-endian -% byte ordering -% 'ieee-le.l64' or 'a' - IEEE floating point with little-endian -% byte ordering and 64 bit long data type -% 'ieee-be.l64' or 's' - IEEE floating point with big-endian byte -% ordering and 64 bit long data type. -% -% Number of scanned images in the file can be obtained by: -% num_scan = hdr.dime.dim(5) -% -% Part of this file is copied and modified from: -% http://www.mathworks.com/matlabcentral/fileexchange/1878-mri-analyze-tools -% -% NIFTI data format can be found on: http://nifti.nimh.nih.gov -% -% - Jimmy Shen (jimmy@rotman-baycrest.on.ca) -% -function [hdr, filetype, fileprefix, machine] = load_nii_hdr(fileprefix) - - if ~exist('fileprefix','var'), - error('Usage: [hdr, filetype, fileprefix, machine] = load_nii_hdr(filename)'); - end - - machine = 'ieee-le'; - new_ext = 0; - - if findstr('.nii',fileprefix) - new_ext = 1; - fileprefix = strrep(fileprefix,'.nii',''); - end - - if findstr('.hdr',fileprefix) - fileprefix = strrep(fileprefix,'.hdr',''); - end - - if findstr('.img',fileprefix) - fileprefix = strrep(fileprefix,'.img',''); - end - - if new_ext - fn = sprintf('%s.nii',fileprefix); - - if ~exist(fn) - msg = sprintf('Cannot find file "%s.nii".', fileprefix); - error(msg); - end - else - fn = sprintf('%s.hdr',fileprefix); - - if ~exist(fn) - msg = sprintf('Cannot find file "%s.hdr".', fileprefix); - error(msg); - end - end - - fid = fopen(fn,'r',machine); - - if fid < 0, - msg = sprintf('Cannot open file %s.',fn); - error(msg); - else - fseek(fid,0,'bof'); - - if fread(fid,1,'int32') == 348 - hdr = read_header(fid); - fclose(fid); - else - fclose(fid); - - % first try reading the opposite endian to 'machine' - % - switch machine, - case 'ieee-le', machine = 'ieee-be'; - case 'ieee-be', machine = 'ieee-le'; - end - - fid = fopen(fn,'r',machine); - - if fid < 0, - msg = sprintf('Cannot open file %s.',fn); - error(msg); - else - fseek(fid,0,'bof'); - - if fread(fid,1,'int32') ~= 348 - - % Now throw an error - % - msg = sprintf('File "%s" is corrupted.',fn); - error(msg); - end - - hdr = read_header(fid); - fclose(fid); - end - end - end - - if strcmp(hdr.hist.magic, 'n+1') - filetype = 2; - elseif strcmp(hdr.hist.magic, 'ni1') - filetype = 1; - else - filetype = 0; - end - - return % load_nii_hdr - - -%--------------------------------------------------------------------- -function [ dsr ] = read_header(fid) - - % Original header structures - % struct dsr - % { - % struct header_key hk; /* 0 + 40 */ - % struct image_dimension dime; /* 40 + 108 */ - % struct data_history hist; /* 148 + 200 */ - % }; /* total= 348 bytes*/ - - dsr.hk = header_key(fid); - dsr.dime = image_dimension(fid); - dsr.hist = data_history(fid); - - % For Analyze data format - % - if ~strcmp(dsr.hist.magic, 'n+1') & ~strcmp(dsr.hist.magic, 'ni1') - dsr.hist.qform_code = 0; - dsr.hist.sform_code = 0; - end - - return % read_header - - -%--------------------------------------------------------------------- -function [ hk ] = header_key(fid) - - fseek(fid,0,'bof'); - - % Original header structures - % struct header_key /* header key */ - % { /* off + size */ - % int sizeof_hdr /* 0 + 4 */ - % char data_type[10]; /* 4 + 10 */ - % char db_name[18]; /* 14 + 18 */ - % int extents; /* 32 + 4 */ - % short int session_error; /* 36 + 2 */ - % char regular; /* 38 + 1 */ - % char dim_info; % char hkey_un0; /* 39 + 1 */ - % }; /* total=40 bytes */ - % - % int sizeof_header Should be 348. - % char regular Must be 'r' to indicate that all images and - % volumes are the same size. - - v6 = version; - if str2num(v6(1))<6 - directchar = '*char'; - else - directchar = 'uchar=>char'; - end - - hk.sizeof_hdr = fread(fid, 1,'int32')'; % should be 348! - hk.data_type = deblank(fread(fid,10,directchar)'); - hk.db_name = deblank(fread(fid,18,directchar)'); - hk.extents = fread(fid, 1,'int32')'; - hk.session_error = fread(fid, 1,'int16')'; - hk.regular = fread(fid, 1,directchar)'; - hk.dim_info = fread(fid, 1,'uchar')'; - - return % header_key - - -%--------------------------------------------------------------------- -function [ dime ] = image_dimension(fid) - - % Original header structures - % struct image_dimension - % { /* off + size */ - % short int dim[8]; /* 0 + 16 */ - % /* - % dim[0] Number of dimensions in database; usually 4. - % dim[1] Image X dimension; number of *pixels* in an image row. - % dim[2] Image Y dimension; number of *pixel rows* in slice. - % dim[3] Volume Z dimension; number of *slices* in a volume. - % dim[4] Time points; number of volumes in database - % */ - % float intent_p1; % char vox_units[4]; /* 16 + 4 */ - % float intent_p2; % char cal_units[8]; /* 20 + 4 */ - % float intent_p3; % char cal_units[8]; /* 24 + 4 */ - % short int intent_code; % short int unused1; /* 28 + 2 */ - % short int datatype; /* 30 + 2 */ - % short int bitpix; /* 32 + 2 */ - % short int slice_start; % short int dim_un0; /* 34 + 2 */ - % float pixdim[8]; /* 36 + 32 */ - % /* - % pixdim[] specifies the voxel dimensions: - % pixdim[1] - voxel width, mm - % pixdim[2] - voxel height, mm - % pixdim[3] - slice thickness, mm - % pixdim[4] - volume timing, in msec - % ..etc - % */ - % float vox_offset; /* 68 + 4 */ - % float scl_slope; % float roi_scale; /* 72 + 4 */ - % float scl_inter; % float funused1; /* 76 + 4 */ - % short slice_end; % float funused2; /* 80 + 2 */ - % char slice_code; % float funused2; /* 82 + 1 */ - % char xyzt_units; % float funused2; /* 83 + 1 */ - % float cal_max; /* 84 + 4 */ - % float cal_min; /* 88 + 4 */ - % float slice_duration; % int compressed; /* 92 + 4 */ - % float toffset; % int verified; /* 96 + 4 */ - % int glmax; /* 100 + 4 */ - % int glmin; /* 104 + 4 */ - % }; /* total=108 bytes */ - - dime.dim = fread(fid,8,'int16')'; - dime.intent_p1 = fread(fid,1,'float32')'; - dime.intent_p2 = fread(fid,1,'float32')'; - dime.intent_p3 = fread(fid,1,'float32')'; - dime.intent_code = fread(fid,1,'int16')'; - dime.datatype = fread(fid,1,'int16')'; - dime.bitpix = fread(fid,1,'int16')'; - dime.slice_start = fread(fid,1,'int16')'; - dime.pixdim = fread(fid,8,'float32')'; - dime.vox_offset = fread(fid,1,'float32')'; - dime.scl_slope = fread(fid,1,'float32')'; - dime.scl_inter = fread(fid,1,'float32')'; - dime.slice_end = fread(fid,1,'int16')'; - dime.slice_code = fread(fid,1,'uchar')'; - dime.xyzt_units = fread(fid,1,'uchar')'; - dime.cal_max = fread(fid,1,'float32')'; - dime.cal_min = fread(fid,1,'float32')'; - dime.slice_duration = fread(fid,1,'float32')'; - dime.toffset = fread(fid,1,'float32')'; - dime.glmax = fread(fid,1,'int32')'; - dime.glmin = fread(fid,1,'int32')'; - - return % image_dimension - - -%--------------------------------------------------------------------- -function [ hist ] = data_history(fid) - - % Original header structures - % struct data_history - % { /* off + size */ - % char descrip[80]; /* 0 + 80 */ - % char aux_file[24]; /* 80 + 24 */ - % short int qform_code; /* 104 + 2 */ - % short int sform_code; /* 106 + 2 */ - % float quatern_b; /* 108 + 4 */ - % float quatern_c; /* 112 + 4 */ - % float quatern_d; /* 116 + 4 */ - % float qoffset_x; /* 120 + 4 */ - % float qoffset_y; /* 124 + 4 */ - % float qoffset_z; /* 128 + 4 */ - % float srow_x[4]; /* 132 + 16 */ - % float srow_y[4]; /* 148 + 16 */ - % float srow_z[4]; /* 164 + 16 */ - % char intent_name[16]; /* 180 + 16 */ - % char magic[4]; % int smin; /* 196 + 4 */ - % }; /* total=200 bytes */ - - v6 = version; - if str2num(v6(1))<6 - directchar = '*char'; - else - directchar = 'uchar=>char'; - end - - hist.descrip = deblank(fread(fid,80,directchar)'); - hist.aux_file = deblank(fread(fid,24,directchar)'); - hist.qform_code = fread(fid,1,'int16')'; - hist.sform_code = fread(fid,1,'int16')'; - hist.quatern_b = fread(fid,1,'float32')'; - hist.quatern_c = fread(fid,1,'float32')'; - hist.quatern_d = fread(fid,1,'float32')'; - hist.qoffset_x = fread(fid,1,'float32')'; - hist.qoffset_y = fread(fid,1,'float32')'; - hist.qoffset_z = fread(fid,1,'float32')'; - hist.srow_x = fread(fid,4,'float32')'; - hist.srow_y = fread(fid,4,'float32')'; - hist.srow_z = fread(fid,4,'float32')'; - hist.intent_name = deblank(fread(fid,16,directchar)'); - hist.magic = deblank(fread(fid,4,directchar)'); - - fseek(fid,253,'bof'); - hist.originator = fread(fid, 5,'int16')'; - - return % data_history - diff --git a/DefaultData/load_nii_img.m b/DefaultData/load_nii_img.m deleted file mode 100755 index e8d6e5f..0000000 --- a/DefaultData/load_nii_img.m +++ /dev/null @@ -1,386 +0,0 @@ -% internal function - -% - Jimmy Shen (jimmy@rotman-baycrest.on.ca) - -function [img,hdr] = load_nii_img(hdr,filetype,fileprefix,machine,img_idx,dim5_idx,dim6_idx,dim7_idx,old_RGB) - - if ~exist('hdr','var') | ~exist('filetype','var') | ~exist('fileprefix','var') | ~exist('machine','var') - error('Usage: [img,hdr] = load_nii_img(hdr,filetype,fileprefix,machine,[img_idx],[dim5_idx],[dim6_idx],[dim7_idx],[old_RGB]);'); - end - - if ~exist('img_idx','var') | isempty(img_idx) | hdr.dime.dim(5)<1 - img_idx = []; - end - - if ~exist('dim5_idx','var') | isempty(dim5_idx) | hdr.dime.dim(6)<1 - dim5_idx = []; - end - - if ~exist('dim6_idx','var') | isempty(dim6_idx) | hdr.dime.dim(7)<1 - dim6_idx = []; - end - - if ~exist('dim7_idx','var') | isempty(dim7_idx) | hdr.dime.dim(8)<1 - dim7_idx = []; - end - - if ~exist('old_RGB','var') | isempty(old_RGB) - old_RGB = 0; - end - - % check img_idx - % - if ~isempty(img_idx) & ~isnumeric(img_idx) - error('"img_idx" should be a numerical array.'); - end - - if length(unique(img_idx)) ~= length(img_idx) - error('Duplicate image index in "img_idx"'); - end - - if ~isempty(img_idx) & (min(img_idx) < 1 | max(img_idx) > hdr.dime.dim(5)) - max_range = hdr.dime.dim(5); - - if max_range == 1 - error(['"img_idx" should be 1.']); - else - range = ['1 ' num2str(max_range)]; - error(['"img_idx" should be an integer within the range of [' range '].']); - end - end - - % check dim5_idx - % - if ~isempty(dim5_idx) & ~isnumeric(dim5_idx) - error('"dim5_idx" should be a numerical array.'); - end - - if length(unique(dim5_idx)) ~= length(dim5_idx) - error('Duplicate index in "dim5_idx"'); - end - - if ~isempty(dim5_idx) & (min(dim5_idx) < 1 | max(dim5_idx) > hdr.dime.dim(6)) - max_range = hdr.dime.dim(6); - - if max_range == 1 - error(['"dim5_idx" should be 1.']); - else - range = ['1 ' num2str(max_range)]; - error(['"dim5_idx" should be an integer within the range of [' range '].']); - end - end - - % check dim6_idx - % - if ~isempty(dim6_idx) & ~isnumeric(dim6_idx) - error('"dim6_idx" should be a numerical array.'); - end - - if length(unique(dim6_idx)) ~= length(dim6_idx) - error('Duplicate index in "dim6_idx"'); - end - - if ~isempty(dim6_idx) & (min(dim6_idx) < 1 | max(dim6_idx) > hdr.dime.dim(7)) - max_range = hdr.dime.dim(7); - - if max_range == 1 - error(['"dim6_idx" should be 1.']); - else - range = ['1 ' num2str(max_range)]; - error(['"dim6_idx" should be an integer within the range of [' range '].']); - end - end - - % check dim7_idx - % - if ~isempty(dim7_idx) & ~isnumeric(dim7_idx) - error('"dim7_idx" should be a numerical array.'); - end - - if length(unique(dim7_idx)) ~= length(dim7_idx) - error('Duplicate index in "dim7_idx"'); - end - - if ~isempty(dim7_idx) & (min(dim7_idx) < 1 | max(dim7_idx) > hdr.dime.dim(8)) - max_range = hdr.dime.dim(8); - - if max_range == 1 - error(['"dim7_idx" should be 1.']); - else - range = ['1 ' num2str(max_range)]; - error(['"dim7_idx" should be an integer within the range of [' range '].']); - end - end - - [img,hdr] = read_image(hdr,filetype,fileprefix,machine,img_idx,dim5_idx,dim6_idx,dim7_idx,old_RGB); - - return % load_nii_img - - -%--------------------------------------------------------------------- -function [img,hdr] = read_image(hdr,filetype,fileprefix,machine,img_idx,dim5_idx,dim6_idx,dim7_idx,old_RGB) - - switch filetype - case {0, 1} - fn = [fileprefix '.img']; - case 2 - fn = [fileprefix '.nii']; - end - - fid = fopen(fn,'r',machine); - - if fid < 0, - msg = sprintf('Cannot open file %s.',fn); - error(msg); - end - - % Set bitpix according to datatype - % - % /*Acceptable values for datatype are*/ - % - % 0 None (Unknown bit per voxel) % DT_NONE, DT_UNKNOWN - % 1 Binary (ubit1, bitpix=1) % DT_BINARY - % 2 Unsigned char (uchar or uint8, bitpix=8) % DT_UINT8, NIFTI_TYPE_UINT8 - % 4 Signed short (int16, bitpix=16) % DT_INT16, NIFTI_TYPE_INT16 - % 8 Signed integer (int32, bitpix=32) % DT_INT32, NIFTI_TYPE_INT32 - % 16 Floating point (single or float32, bitpix=32) % DT_FLOAT32, NIFTI_TYPE_FLOAT32 - % 32 Complex, 2 float32 (Use float32, bitpix=64) % DT_COMPLEX64, NIFTI_TYPE_COMPLEX64 - % 64 Double precision (double or float64, bitpix=64) % DT_FLOAT64, NIFTI_TYPE_FLOAT64 - % 128 uint8 RGB (Use uint8, bitpix=24) % DT_RGB24, NIFTI_TYPE_RGB24 - % 256 Signed char (schar or int8, bitpix=8) % DT_INT8, NIFTI_TYPE_INT8 - % 511 Single RGB (Use float32, bitpix=96) % DT_RGB96, NIFTI_TYPE_RGB96 - % 512 Unsigned short (uint16, bitpix=16) % DT_UNINT16, NIFTI_TYPE_UNINT16 - % 768 Unsigned integer (uint32, bitpix=32) % DT_UNINT32, NIFTI_TYPE_UNINT32 - % 1024 Signed long long (int64, bitpix=64) % DT_INT64, NIFTI_TYPE_INT64 - % 1280 Unsigned long long (uint64, bitpix=64) % DT_UINT64, NIFTI_TYPE_UINT64 - % 1536 Long double, float128 (Unsupported, bitpix=128) % DT_FLOAT128, NIFTI_TYPE_FLOAT128 - % 1792 Complex128, 2 float64 (Use float64, bitpix=128) % DT_COMPLEX128, NIFTI_TYPE_COMPLEX128 - % 2048 Complex256, 2 float128 (Unsupported, bitpix=256) % DT_COMPLEX128, NIFTI_TYPE_COMPLEX128 - % - switch hdr.dime.datatype - case 1, - hdr.dime.bitpix = 1; precision = 'ubit1'; - case 2, - hdr.dime.bitpix = 8; precision = 'uint8'; - case 4, - hdr.dime.bitpix = 16; precision = 'int16'; - case 8, - hdr.dime.bitpix = 32; precision = 'int32'; - case 16, - hdr.dime.bitpix = 32; precision = 'float32'; - case 32, - hdr.dime.bitpix = 64; precision = 'float32'; - case 64, - hdr.dime.bitpix = 64; precision = 'float64'; - case 128, - hdr.dime.bitpix = 24; precision = 'uint8'; - case 256 - hdr.dime.bitpix = 8; precision = 'int8'; - case 511 - hdr.dime.bitpix = 96; precision = 'float32'; - case 512 - hdr.dime.bitpix = 16; precision = 'uint16'; - case 768 - hdr.dime.bitpix = 32; precision = 'uint32'; - case 1024 - hdr.dime.bitpix = 64; precision = 'int64'; - case 1280 - hdr.dime.bitpix = 64; precision = 'uint64'; - case 1792, - hdr.dime.bitpix = 128; precision = 'float64'; - otherwise - error('This datatype is not supported'); - end - - hdr.dime.dim(find(hdr.dime.dim < 1)) = 1; - - % move pointer to the start of image block - % - switch filetype - case {0, 1} - fseek(fid, 0, 'bof'); - case 2 - fseek(fid, hdr.dime.vox_offset, 'bof'); - end - - % Load whole image block for old Analyze format or binary image; - % otherwise, load images that are specified in img_idx, dim5_idx, - % dim6_idx, and dim7_idx - % - % For binary image, we have to read all because pos can not be - % seeked in bit and can not be calculated the way below. - % - if hdr.dime.datatype == 1 | isequal(hdr.dime.dim(5:8),ones(1,4)) | ... - (isempty(img_idx) & isempty(dim5_idx) & isempty(dim6_idx) & isempty(dim7_idx)) - - % For each frame, precision of value will be read - % in img_siz times, where img_siz is only the - % dimension size of an image, not the byte storage - % size of an image. - % - img_siz = prod(hdr.dime.dim(2:8)); - - % For complex float32 or complex float64, voxel values - % include [real, imag] - % - if hdr.dime.datatype == 32 | hdr.dime.datatype == 1792 - img_siz = img_siz * 2; - end - - %MPH: For RGB24, voxel values include 3 separate color planes - % - if hdr.dime.datatype == 128 | hdr.dime.datatype == 511 - img_siz = img_siz * 3; - end - - img = fread(fid, img_siz, sprintf('*%s',precision)); - - d1 = hdr.dime.dim(2); - d2 = hdr.dime.dim(3); - d3 = hdr.dime.dim(4); - d4 = hdr.dime.dim(5); - d5 = hdr.dime.dim(6); - d6 = hdr.dime.dim(7); - d7 = hdr.dime.dim(8); - - if isempty(img_idx) - img_idx = 1:d4; - end - - if isempty(dim5_idx) - dim5_idx = 1:d5; - end - - if isempty(dim6_idx) - dim6_idx = 1:d6; - end - - if isempty(dim7_idx) - dim7_idx = 1:d7; - end - else - - img = []; - - d1 = hdr.dime.dim(2); - d2 = hdr.dime.dim(3); - d3 = hdr.dime.dim(4); - d4 = hdr.dime.dim(5); - d5 = hdr.dime.dim(6); - d6 = hdr.dime.dim(7); - d7 = hdr.dime.dim(8); - - if isempty(img_idx) - img_idx = 1:d4; - end - - if isempty(dim5_idx) - dim5_idx = 1:d5; - end - - if isempty(dim6_idx) - dim6_idx = 1:d6; - end - - if isempty(dim7_idx) - dim7_idx = 1:d7; - end - - for i7=1:length(dim7_idx) - for i6=1:length(dim6_idx) - for i5=1:length(dim5_idx) - for t=1:length(img_idx) - - % Position is seeked in bytes. To convert dimension size - % to byte storage size, hdr.dime.bitpix/8 will be - % applied. - % - pos = sub2ind([d1 d2 d3 d4 d5 d6 d7], 1, 1, 1, ... - img_idx(t), dim5_idx(i5),dim6_idx(i6),dim7_idx(i7)) -1; - pos = pos * hdr.dime.bitpix/8; - - img_siz = prod(hdr.dime.dim(2:4)); - - % For complex float32 or complex float64, voxel values - % include [real, imag] - % - if hdr.dime.datatype == 32 | hdr.dime.datatype == 1792 - img_siz = img_siz * 2; - end - - %MPH: For RGB24, voxel values include 3 separate color planes - % - if hdr.dime.datatype == 128 | hdr.dime.datatype == 511 - img_siz = img_siz * 3; - end - - if filetype == 2 - fseek(fid, pos + hdr.dime.vox_offset, 'bof'); - else - fseek(fid, pos, 'bof'); - end - - % For each frame, fread will read precision of value - % in img_siz times - % - img = [img fread(fid, img_siz, sprintf('*%s',precision))]; - end - end - end - end - end - - % For complex float32 or complex float64, voxel values - % include [real, imag] - % - if hdr.dime.datatype == 32 | hdr.dime.datatype == 1792 - img = reshape(img, [2, length(img)/2]); - img = complex(img(1,:)', img(2,:)'); - end - - fclose(fid); - - % Update the global min and max values - % - hdr.dime.glmax = double(max(img(:))); - hdr.dime.glmin = double(min(img(:))); - - % old_RGB treat RGB slice by slice, now it is treated voxel by voxel - % - if old_RGB & hdr.dime.datatype == 128 & hdr.dime.bitpix == 24 - % remove squeeze - img = (reshape(img, [hdr.dime.dim(2:3) 3 hdr.dime.dim(4) length(img_idx) length(dim5_idx) length(dim6_idx) length(dim7_idx)])); - img = permute(img, [1 2 4 3 5 6 7 8]); - elseif hdr.dime.datatype == 128 & hdr.dime.bitpix == 24 - % remove squeeze - img = (reshape(img, [3 hdr.dime.dim(2:4) length(img_idx) length(dim5_idx) length(dim6_idx) length(dim7_idx)])); - img = permute(img, [2 3 4 1 5 6 7 8]); - elseif hdr.dime.datatype == 511 & hdr.dime.bitpix == 96 - img = double(img(:)); - img = (img - min(img))/(max(img) - min(img)); - % remove squeeze - img = (reshape(img, [3 hdr.dime.dim(2:4) length(img_idx) length(dim5_idx) length(dim6_idx) length(dim7_idx)])); - img = permute(img, [2 3 4 1 5 6 7 8]); - else - % remove squeeze - img = (reshape(img, [hdr.dime.dim(2:4) length(img_idx) length(dim5_idx) length(dim6_idx) length(dim7_idx)])); - end - - if ~isempty(img_idx) - hdr.dime.dim(5) = length(img_idx); - end - - if ~isempty(dim5_idx) - hdr.dime.dim(6) = length(dim5_idx); - end - - if ~isempty(dim6_idx) - hdr.dime.dim(7) = length(dim6_idx); - end - - if ~isempty(dim7_idx) - hdr.dime.dim(8) = length(dim7_idx); - end - - return % read_image - diff --git a/DefaultData/make_nii.m b/DefaultData/make_nii.m deleted file mode 100755 index b89fe66..0000000 --- a/DefaultData/make_nii.m +++ /dev/null @@ -1,243 +0,0 @@ -% Make NIfTI structure specified by an N-D matrix. Usually, N is 3 for -% 3D matrix [x y z], or 4 for 4D matrix with time series [x y z t]. -% However, NIfTI allows a maximum of 7D matrix. For RGB24 datatype, an -% extra dimension for RGB should be inserted immediately after [x y z]. -% Optional parameters can also be included, such as: voxel_size, -% origin, datatype, and description. -% -% Once the NIfTI structure is made, it can be saved into NIfTI file -% using "save_nii" command (for more detail, type: help save_nii). -% -% Usage: nii = make_nii(img, [voxel_size], [origin], [datatype], ... -% [description]) -% -% Where: -% -% img: Usually, img is a 3D matrix [x y z], or a 4D -% matrix with time series [x y z t]. However, -% NIfTI allows a maximum of 7D matrix. For RGB -% datatype, an extra dimension for RGB should -% be inserted immediately after [x y z]. -% -% voxel_size (optional): Voxel size in millimeter for each -% dimension. Default is [1 1 1]. -% -% origin (optional): The AC origin. Default is [0 0 0]. -% -% datatype (optional): Storage data type: -% 2 - uint8, 4 - int16, 8 - int32, 16 - float32, -% 32 - complex64, 64 - float64, 128 - RGB24, -% 256 - int8, 512 - uint16, 768 - uint32, -% 1792 - complex128 -% Default will use the data type of 'img' matrix -% -% description (optional): Description of data. Default is ''. -% -% e.g.: -% origin = [33 44 13]; datatype = 64; -% nii = make_nii(img, [], origin, datatype); % default voxel_size -% -% NIFTI data format can be found on: http://nifti.nimh.nih.gov -% -% - Jimmy Shen (jimmy@rotman-baycrest.on.ca) -% -function nii = make_nii(varargin) - - nii.img = varargin{1}; - dims = size(nii.img); - dims = [length(dims) dims ones(1,8)]; - dims = dims(1:8); - - voxel_size = [0 ones(1,7)]; - origin = zeros(1,5); - descrip = ''; - - switch class(nii.img) - case 'uint8' - datatype = 2; - case 'int16' - datatype = 4; - case 'int32' - datatype = 8; - case 'single' - datatype = 16; - case 'double' - datatype = 64; - case 'int8' - datatype = 256; - case 'uint16' - datatype = 512; - case 'uint32' - datatype = 768; - otherwise - error('Datatype is not supported by make_nii.'); - end - - if nargin > 1 & ~isempty(varargin{2}) - voxel_size(2:4) = double(varargin{2}); - end - - if nargin > 2 & ~isempty(varargin{3}) - origin(1:3) = double(varargin{3}); - end - - if nargin > 3 & ~isempty(varargin{4}) - datatype = double(varargin{4}); - end - - if nargin > 4 & ~isempty(varargin{5}) - descrip = varargin{5}; - end - - if datatype == 128 - if ndims(nii.img) > 8 - error('NIfTI only allows a maximum of 7 Dimension matrix.'); - end - - dims(1) = dims(1)-1; - dims(5:8) = [dims(6:8) 1]; - - else - if ndims(nii.img) > 7 - error('NIfTI only allows a maximum of 7 Dimension matrix.'); - end - end - - maxval = round(double(max(nii.img(:)))); - minval = round(double(min(nii.img(:)))); - - nii.hdr = make_header(dims, voxel_size, origin, datatype, ... - descrip, maxval, minval); - - switch nii.hdr.dime.datatype - case 2 - nii.img = uint8(nii.img); - case 4 - nii.img = int16(nii.img); - case 8 - nii.img = int32(nii.img); - case 16 - nii.img = single(nii.img); - case 32 - nii.img = single(nii.img); - case 64 - nii.img = double(nii.img); - case 128 - nii.img = uint8(nii.img); - case 256 - nii.img = int8(nii.img); - case 512 - nii.img = uint16(nii.img); - case 768 - nii.img = uint32(nii.img); - case 1792 - nii.img = double(nii.img); - otherwise - error('Datatype is not supported by make_nii.'); - end - - return; % make_nii - - -%--------------------------------------------------------------------- -function hdr = make_header(dims, voxel_size, origin, datatype, ... - descrip, maxval, minval) - - hdr.hk = header_key; - hdr.dime = image_dimension(dims, voxel_size, datatype, maxval, minval); - hdr.hist = data_history(origin, descrip); - - return; % make_header - - -%--------------------------------------------------------------------- -function hk = header_key - - hk.sizeof_hdr = 348; % must be 348! - hk.data_type = ''; - hk.db_name = ''; - hk.extents = 0; - hk.session_error = 0; - hk.regular = 'r'; - hk.dim_info = 0; - - return; % header_key - - -%--------------------------------------------------------------------- -function dime = image_dimension(dims, voxel_size, datatype, maxval, minval) - - dime.dim = dims; - dime.intent_p1 = 0; - dime.intent_p2 = 0; - dime.intent_p3 = 0; - dime.intent_code = 0; - dime.datatype = datatype; - - switch dime.datatype - case 2, - dime.bitpix = 8; precision = 'uint8'; - case 4, - dime.bitpix = 16; precision = 'int16'; - case 8, - dime.bitpix = 32; precision = 'int32'; - case 16, - dime.bitpix = 32; precision = 'float32'; - case 32, - dime.bitpix = 64; precision = 'float32'; - case 64, - dime.bitpix = 64; precision = 'float64'; - case 128, - dime.bitpix = 24; precision = 'uint8'; - case 256 - dime.bitpix = 8; precision = 'int8'; - case 512 - dime.bitpix = 16; precision = 'uint16'; - case 768 - dime.bitpix = 32; precision = 'uint32'; - case 1792, - dime.bitpix = 128; precision = 'float64'; - otherwise - error('Datatype is not supported by make_nii.'); - end - - dime.slice_start = 0; - dime.pixdim = voxel_size; - dime.vox_offset = 0; - dime.scl_slope = 0; - dime.scl_inter = 0; - dime.slice_end = 0; - dime.slice_code = 0; - dime.xyzt_units = 0; - dime.cal_max = 0; - dime.cal_min = 0; - dime.slice_duration = 0; - dime.toffset = 0; - dime.glmax = maxval; - dime.glmin = minval; - - return; % image_dimension - - -%--------------------------------------------------------------------- -function hist = data_history(origin, descrip) - - hist.descrip = descrip; - hist.aux_file = 'none'; - hist.qform_code = 0; - hist.sform_code = 0; - hist.quatern_b = 0; - hist.quatern_c = 0; - hist.quatern_d = 0; - hist.qoffset_x = 0; - hist.qoffset_y = 0; - hist.qoffset_z = 0; - hist.srow_x = zeros(1,4); - hist.srow_y = zeros(1,4); - hist.srow_z = zeros(1,4); - hist.intent_name = ''; - hist.magic = ''; - hist.originator = origin; - - return; % data_history - diff --git a/DefaultData/save_nii.m b/DefaultData/save_nii.m deleted file mode 100755 index c96ee57..0000000 --- a/DefaultData/save_nii.m +++ /dev/null @@ -1,233 +0,0 @@ -% Save NIFTI dataset. Support both *.nii and *.hdr/*.img file extension. -% If file extension is not provided, *.hdr/*.img will be used as default. -% -% Usage: save_nii(nii, filename, [old_RGB]) -% -% nii.hdr - struct with NIFTI header fields (from load_nii.m or make_nii.m) -% -% nii.img - 3D (or 4D) matrix of NIFTI data. -% -% filename - NIFTI file name. -% -% old_RGB - an optional boolean variable to handle special RGB data -% sequence [R1 R2 ... G1 G2 ... B1 B2 ...] that is used only by -% AnalyzeDirect (Analyze Software). Since both NIfTI and Analyze -% file format use RGB triple [R1 G1 B1 R2 G2 B2 ...] sequentially -% for each voxel, this variable is set to FALSE by default. If you -% would like the saved image only to be opened by AnalyzeDirect -% Software, set old_RGB to TRUE (or 1). It will be set to 0, if it -% is default or empty. -% -% Tip: to change the data type, set nii.hdr.dime.datatype, -% and nii.hdr.dime.bitpix to: -% -% 0 None (Unknown bit per voxel) % DT_NONE, DT_UNKNOWN -% 1 Binary (ubit1, bitpix=1) % DT_BINARY -% 2 Unsigned char (uchar or uint8, bitpix=8) % DT_UINT8, NIFTI_TYPE_UINT8 -% 4 Signed short (int16, bitpix=16) % DT_INT16, NIFTI_TYPE_INT16 -% 8 Signed integer (int32, bitpix=32) % DT_INT32, NIFTI_TYPE_INT32 -% 16 Floating point (single or float32, bitpix=32) % DT_FLOAT32, NIFTI_TYPE_FLOAT32 -% 32 Complex, 2 float32 (Use float32, bitpix=64) % DT_COMPLEX64, NIFTI_TYPE_COMPLEX64 -% 64 Double precision (double or float64, bitpix=64) % DT_FLOAT64, NIFTI_TYPE_FLOAT64 -% 128 Red-Green-Blue (Use uint8, bitpix=24) % DT_RGB24, NIFTI_TYPE_RGB24 -% 256 Signed char (schar or int8, bitpix=8) % DT_INT8, NIFTI_TYPE_INT8 -% 512 Unsigned short (uint16, bitpix=16) % DT_UNINT16, NIFTI_TYPE_UNINT16 -% 768 Unsigned integer (uint32, bitpix=32) % DT_UNINT32, NIFTI_TYPE_UNINT32 -% 1024 Signed long long (int64, bitpix=64) % DT_INT64, NIFTI_TYPE_INT64 -% 1280 Unsigned long long (uint64, bitpix=64) % DT_UINT64, NIFTI_TYPE_UINT64 -% 1536 Long double, float128 (Unsupported, bitpix=128) % DT_FLOAT128, NIFTI_TYPE_FLOAT128 -% 1792 Complex128, 2 float64 (Use float64, bitpix=128) % DT_COMPLEX128, NIFTI_TYPE_COMPLEX128 -% 2048 Complex256, 2 float128 (Unsupported, bitpix=256) % DT_COMPLEX128, NIFTI_TYPE_COMPLEX128 -% -% Part of this file is copied and modified from: -% http://www.mathworks.com/matlabcentral/fileexchange/1878-mri-analyze-tools -% -% NIFTI data format can be found on: http://nifti.nimh.nih.gov -% -% - Jimmy Shen (jimmy@rotman-baycrest.on.ca) -% - "old_RGB" related codes in "save_nii.m" are added by Mike Harms (2006.06.28) -% -function save_nii(nii, fileprefix, old_RGB) - - if ~exist('nii','var') | isempty(nii) | ~isfield(nii,'hdr') | ... - ~isfield(nii,'img') | ~exist('fileprefix','var') | isempty(fileprefix) - - error('Usage: save_nii(nii, filename, [old_RGB])'); - end - - if isfield(nii,'untouch') & nii.untouch == 1 - error('Usage: please use ''save_untouch_nii.m'' for the untouched structure.'); - end - - if ~exist('old_RGB','var') | isempty(old_RGB) - old_RGB = 0; - end - - filetype = 1; - - % Note: fileprefix is actually the filename you want to save - % - if findstr('.nii',fileprefix) - filetype = 2; - fileprefix = strrep(fileprefix,'.nii',''); - end - - if findstr('.hdr',fileprefix) - fileprefix = strrep(fileprefix,'.hdr',''); - end - - if findstr('.img',fileprefix) - fileprefix = strrep(fileprefix,'.img',''); - end - - write_nii(nii, filetype, fileprefix, old_RGB); - - if filetype == 1 - - % So earlier versions of SPM can also open it with correct originator - % - M=[[diag(nii.hdr.dime.pixdim(2:4)) -[nii.hdr.hist.originator(1:3).*nii.hdr.dime.pixdim(2:4)]'];[0 0 0 1]]; - save([fileprefix '.mat'], 'M'); - end - - return % save_nii - - -%----------------------------------------------------------------------------------- -function write_nii(nii, filetype, fileprefix, old_RGB) - - hdr = nii.hdr; - - if isfield(nii,'ext') & ~isempty(nii.ext) - ext = nii.ext; - [ext, esize_total] = verify_nii_ext(ext); - else - ext = []; - end - - switch double(hdr.dime.datatype), - case 1, - hdr.dime.bitpix = int16(1 ); precision = 'ubit1'; - case 2, - hdr.dime.bitpix = int16(8 ); precision = 'uint8'; - case 4, - hdr.dime.bitpix = int16(16); precision = 'int16'; - case 8, - hdr.dime.bitpix = int16(32); precision = 'int32'; - case 16, - hdr.dime.bitpix = int16(32); precision = 'float32'; - case 32, - hdr.dime.bitpix = int16(64); precision = 'float32'; - case 64, - hdr.dime.bitpix = int16(64); precision = 'float64'; - case 128, - hdr.dime.bitpix = int16(24); precision = 'uint8'; - case 256 - hdr.dime.bitpix = int16(8 ); precision = 'int8'; - case 512 - hdr.dime.bitpix = int16(16); precision = 'uint16'; - case 768 - hdr.dime.bitpix = int16(32); precision = 'uint32'; - case 1024 - hdr.dime.bitpix = int16(64); precision = 'int64'; - case 1280 - hdr.dime.bitpix = int16(64); precision = 'uint64'; - case 1792, - hdr.dime.bitpix = int16(128); precision = 'float64'; - otherwise - error('This datatype is not supported'); - end - - hdr.dime.glmax = round(double(max(nii.img(:)))); - hdr.dime.glmin = round(double(min(nii.img(:)))); - - if filetype == 2 - fid = fopen(sprintf('%s.nii',fileprefix),'w'); - - if fid < 0, - msg = sprintf('Cannot open file %s.nii.',fileprefix); - error(msg); - end - - hdr.dime.vox_offset = 352; - - if ~isempty(ext) - hdr.dime.vox_offset = hdr.dime.vox_offset + esize_total; - end - - hdr.hist.magic = 'n+1'; - save_nii_hdr(hdr, fid); - - if ~isempty(ext) - save_nii_ext(ext, fid); - end - else - fid = fopen(sprintf('%s.hdr',fileprefix),'w'); - - if fid < 0, - msg = sprintf('Cannot open file %s.hdr.',fileprefix); - error(msg); - end - - hdr.dime.vox_offset = 0; - hdr.hist.magic = 'ni1'; - save_nii_hdr(hdr, fid); - - if ~isempty(ext) - save_nii_ext(ext, fid); - end - - fclose(fid); - fid = fopen(sprintf('%s.img',fileprefix),'w'); - end - - ScanDim = double(hdr.dime.dim(5)); % t - SliceDim = double(hdr.dime.dim(4)); % z - RowDim = double(hdr.dime.dim(3)); % y - PixelDim = double(hdr.dime.dim(2)); % x - SliceSz = double(hdr.dime.pixdim(4)); - RowSz = double(hdr.dime.pixdim(3)); - PixelSz = double(hdr.dime.pixdim(2)); - - x = 1:PixelDim; - - if filetype == 2 & isempty(ext) - skip_bytes = double(hdr.dime.vox_offset) - 348; - else - skip_bytes = 0; - end - - if double(hdr.dime.datatype) == 128 - - % RGB planes are expected to be in the 4th dimension of nii.img - % - if(size(nii.img,4)~=3) - error(['The NII structure does not appear to have 3 RGB color planes in the 4th dimension']); - end - - if old_RGB - nii.img = permute(nii.img, [1 2 4 3 5 6 7 8]); - else - nii.img = permute(nii.img, [4 1 2 3 5 6 7 8]); - end - end - - % For complex float32 or complex float64, voxel values - % include [real, imag] - % - if hdr.dime.datatype == 32 | hdr.dime.datatype == 1792 - real_img = real(nii.img(:))'; - nii.img = imag(nii.img(:))'; - nii.img = [real_img; nii.img]; - end - - if skip_bytes - fwrite(fid, ones(1,skip_bytes), 'uint8'); - end - - fwrite(fid, nii.img, precision); -% fwrite(fid, nii.img, precision, skip_bytes); % error using skip - fclose(fid); - - return; % write_nii - diff --git a/DefaultData/save_nii_ext.m b/DefaultData/save_nii_ext.m deleted file mode 100755 index 4788649..0000000 --- a/DefaultData/save_nii_ext.m +++ /dev/null @@ -1,38 +0,0 @@ -% Save NIFTI header extension. -% -% Usage: save_nii_ext(ext, fid) -% -% ext - struct with NIFTI header extension fields. -% -% NIFTI data format can be found on: http://nifti.nimh.nih.gov -% -% - Jimmy Shen (jimmy@rotman-baycrest.on.ca) -% -function save_nii_ext(ext, fid) - - if ~exist('ext','var') | ~exist('fid','var') - error('Usage: save_nii_ext(ext, fid)'); - end - - if ~isfield(ext,'extension') | ~isfield(ext,'section') | ~isfield(ext,'num_ext') - error('Wrong header extension'); - end - - write_ext(ext, fid); - - return; % save_nii_ext - - -%--------------------------------------------------------------------- -function write_ext(ext, fid) - - fwrite(fid, ext.extension, 'uchar'); - - for i=1:ext.num_ext - fwrite(fid, ext.section(i).esize, 'int32'); - fwrite(fid, ext.section(i).ecode, 'int32'); - fwrite(fid, ext.section(i).edata, 'uchar'); - end - - return; % write_ext - diff --git a/DefaultData/save_nii_hdr.m b/DefaultData/save_nii_hdr.m deleted file mode 100755 index 6cc34bb..0000000 --- a/DefaultData/save_nii_hdr.m +++ /dev/null @@ -1,239 +0,0 @@ -% Save NIFTI dataset header. Support both *.nii and *.hdr/*.img file -% extension. -% -% Usage: save_nii_hdr(hdr, fid) -% -% hdr - struct with NIFTI header fields. -% -% fileprefix - NIFTI file name without extension. -% -% Part of this file is copied and modified from: -% http://www.mathworks.com/matlabcentral/fileexchange/1878-mri-analyze-tools -% -% NIFTI data format can be found on: http://nifti.nimh.nih.gov -% -% - Jimmy Shen (jimmy@rotman-baycrest.on.ca) -% -function save_nii_hdr(hdr, fid) - - if ~exist('hdr','var') | ~exist('fid','var') - error('Usage: save_nii_hdr(hdr, fid)'); - end - - if ~isequal(hdr.hk.sizeof_hdr,348), - error('hdr.hk.sizeof_hdr must be 348.'); - end - - if hdr.hist.qform_code == 0 & hdr.hist.sform_code == 0 - hdr.hist.sform_code = 1; - hdr.hist.srow_x(1) = hdr.dime.pixdim(2); - hdr.hist.srow_x(2) = 0; - hdr.hist.srow_x(3) = 0; - hdr.hist.srow_y(1) = 0; - hdr.hist.srow_y(2) = hdr.dime.pixdim(3); - hdr.hist.srow_y(3) = 0; - hdr.hist.srow_z(1) = 0; - hdr.hist.srow_z(2) = 0; - hdr.hist.srow_z(3) = hdr.dime.pixdim(4); - hdr.hist.srow_x(4) = (1-hdr.hist.originator(1))*hdr.dime.pixdim(2); - hdr.hist.srow_y(4) = (1-hdr.hist.originator(2))*hdr.dime.pixdim(3); - hdr.hist.srow_z(4) = (1-hdr.hist.originator(3))*hdr.dime.pixdim(4); - end - - write_header(hdr, fid); - - return; % save_nii_hdr - - -%--------------------------------------------------------------------- -function write_header(hdr, fid) - - % Original header structures - % struct dsr /* dsr = hdr */ - % { - % struct header_key hk; /* 0 + 40 */ - % struct image_dimension dime; /* 40 + 108 */ - % struct data_history hist; /* 148 + 200 */ - % }; /* total= 348 bytes*/ - - header_key(fid, hdr.hk); - image_dimension(fid, hdr.dime); - data_history(fid, hdr.hist); - - % check the file size is 348 bytes - % - fbytes = ftell(fid); - - if ~isequal(fbytes,348), - msg = sprintf('Header size is not 348 bytes.'); - warning(msg); - end - - return; % write_header - - -%--------------------------------------------------------------------- -function header_key(fid, hk) - - fseek(fid,0,'bof'); - - % Original header structures - % struct header_key /* header key */ - % { /* off + size */ - % int sizeof_hdr /* 0 + 4 */ - % char data_type[10]; /* 4 + 10 */ - % char db_name[18]; /* 14 + 18 */ - % int extents; /* 32 + 4 */ - % short int session_error; /* 36 + 2 */ - % char regular; /* 38 + 1 */ - % char dim_info; % char hkey_un0; /* 39 + 1 */ - % }; /* total=40 bytes */ - - fwrite(fid, hk.sizeof_hdr(1), 'int32'); % must be 348. - - % data_type = sprintf('%-10s',hk.data_type); % ensure it is 10 chars from left - % fwrite(fid, data_type(1:10), 'uchar'); - pad = zeros(1, 10-length(hk.data_type)); - hk.data_type = [hk.data_type char(pad)]; - fwrite(fid, hk.data_type(1:10), 'uchar'); - - % db_name = sprintf('%-18s', hk.db_name); % ensure it is 18 chars from left - % fwrite(fid, db_name(1:18), 'uchar'); - pad = zeros(1, 18-length(hk.db_name)); - hk.db_name = [hk.db_name char(pad)]; - fwrite(fid, hk.db_name(1:18), 'uchar'); - - fwrite(fid, hk.extents(1), 'int32'); - fwrite(fid, hk.session_error(1), 'int16'); - fwrite(fid, hk.regular(1), 'uchar'); % might be uint8 - - % fwrite(fid, hk.hkey_un0(1), 'uchar'); - % fwrite(fid, hk.hkey_un0(1), 'uint8'); - fwrite(fid, hk.dim_info(1), 'uchar'); - - return; % header_key - - -%--------------------------------------------------------------------- -function image_dimension(fid, dime) - - % Original header structures - % struct image_dimension - % { /* off + size */ - % short int dim[8]; /* 0 + 16 */ - % float intent_p1; % char vox_units[4]; /* 16 + 4 */ - % float intent_p2; % char cal_units[8]; /* 20 + 4 */ - % float intent_p3; % char cal_units[8]; /* 24 + 4 */ - % short int intent_code; % short int unused1; /* 28 + 2 */ - % short int datatype; /* 30 + 2 */ - % short int bitpix; /* 32 + 2 */ - % short int slice_start; % short int dim_un0; /* 34 + 2 */ - % float pixdim[8]; /* 36 + 32 */ - % /* - % pixdim[] specifies the voxel dimensions: - % pixdim[1] - voxel width - % pixdim[2] - voxel height - % pixdim[3] - interslice distance - % pixdim[4] - volume timing, in msec - % ..etc - % */ - % float vox_offset; /* 68 + 4 */ - % float scl_slope; % float roi_scale; /* 72 + 4 */ - % float scl_inter; % float funused1; /* 76 + 4 */ - % short slice_end; % float funused2; /* 80 + 2 */ - % char slice_code; % float funused2; /* 82 + 1 */ - % char xyzt_units; % float funused2; /* 83 + 1 */ - % float cal_max; /* 84 + 4 */ - % float cal_min; /* 88 + 4 */ - % float slice_duration; % int compressed; /* 92 + 4 */ - % float toffset; % int verified; /* 96 + 4 */ - % int glmax; /* 100 + 4 */ - % int glmin; /* 104 + 4 */ - % }; /* total=108 bytes */ - - fwrite(fid, dime.dim(1:8), 'int16'); - fwrite(fid, dime.intent_p1(1), 'float32'); - fwrite(fid, dime.intent_p2(1), 'float32'); - fwrite(fid, dime.intent_p3(1), 'float32'); - fwrite(fid, dime.intent_code(1), 'int16'); - fwrite(fid, dime.datatype(1), 'int16'); - fwrite(fid, dime.bitpix(1), 'int16'); - fwrite(fid, dime.slice_start(1), 'int16'); - fwrite(fid, dime.pixdim(1:8), 'float32'); - fwrite(fid, dime.vox_offset(1), 'float32'); - fwrite(fid, dime.scl_slope(1), 'float32'); - fwrite(fid, dime.scl_inter(1), 'float32'); - fwrite(fid, dime.slice_end(1), 'int16'); - fwrite(fid, dime.slice_code(1), 'uchar'); - fwrite(fid, dime.xyzt_units(1), 'uchar'); - fwrite(fid, dime.cal_max(1), 'float32'); - fwrite(fid, dime.cal_min(1), 'float32'); - fwrite(fid, dime.slice_duration(1), 'float32'); - fwrite(fid, dime.toffset(1), 'float32'); - fwrite(fid, dime.glmax(1), 'int32'); - fwrite(fid, dime.glmin(1), 'int32'); - - return; % image_dimension - - -%--------------------------------------------------------------------- -function data_history(fid, hist) - - % Original header structures - %struct data_history - % { /* off + size */ - % char descrip[80]; /* 0 + 80 */ - % char aux_file[24]; /* 80 + 24 */ - % short int qform_code; /* 104 + 2 */ - % short int sform_code; /* 106 + 2 */ - % float quatern_b; /* 108 + 4 */ - % float quatern_c; /* 112 + 4 */ - % float quatern_d; /* 116 + 4 */ - % float qoffset_x; /* 120 + 4 */ - % float qoffset_y; /* 124 + 4 */ - % float qoffset_z; /* 128 + 4 */ - % float srow_x[4]; /* 132 + 16 */ - % float srow_y[4]; /* 148 + 16 */ - % float srow_z[4]; /* 164 + 16 */ - % char intent_name[16]; /* 180 + 16 */ - % char magic[4]; % int smin; /* 196 + 4 */ - % }; /* total=200 bytes */ - - % descrip = sprintf('%-80s', hist.descrip); % 80 chars from left - % fwrite(fid, descrip(1:80), 'uchar'); - pad = zeros(1, 80-length(hist.descrip)); - hist.descrip = [hist.descrip char(pad)]; - fwrite(fid, hist.descrip(1:80), 'uchar'); - - % aux_file = sprintf('%-24s', hist.aux_file); % 24 chars from left - % fwrite(fid, aux_file(1:24), 'uchar'); - pad = zeros(1, 24-length(hist.aux_file)); - hist.aux_file = [hist.aux_file char(pad)]; - fwrite(fid, hist.aux_file(1:24), 'uchar'); - - fwrite(fid, hist.qform_code, 'int16'); - fwrite(fid, hist.sform_code, 'int16'); - fwrite(fid, hist.quatern_b, 'float32'); - fwrite(fid, hist.quatern_c, 'float32'); - fwrite(fid, hist.quatern_d, 'float32'); - fwrite(fid, hist.qoffset_x, 'float32'); - fwrite(fid, hist.qoffset_y, 'float32'); - fwrite(fid, hist.qoffset_z, 'float32'); - fwrite(fid, hist.srow_x(1:4), 'float32'); - fwrite(fid, hist.srow_y(1:4), 'float32'); - fwrite(fid, hist.srow_z(1:4), 'float32'); - - % intent_name = sprintf('%-16s', hist.intent_name); % 16 chars from left - % fwrite(fid, intent_name(1:16), 'uchar'); - pad = zeros(1, 16-length(hist.intent_name)); - hist.intent_name = [hist.intent_name char(pad)]; - fwrite(fid, hist.intent_name(1:16), 'uchar'); - - % magic = sprintf('%-4s', hist.magic); % 4 chars from left - % fwrite(fid, magic(1:4), 'uchar'); - pad = zeros(1, 4-length(hist.magic)); - hist.magic = [hist.magic char(pad)]; - fwrite(fid, hist.magic(1:4), 'uchar'); - - return; % data_history - diff --git a/DefaultData/xform_nii.m b/DefaultData/xform_nii.m deleted file mode 100755 index 21d82cd..0000000 --- a/DefaultData/xform_nii.m +++ /dev/null @@ -1,521 +0,0 @@ -% internal function - -% 'xform_nii.m' is an internal function called by "load_nii.m", so -% you do not need run this program by yourself. It does simplified -% NIfTI sform/qform affine transform, and supports some of the -% affine transforms, including translation, reflection, and -% orthogonal rotation (N*90 degree). -% -% For other affine transforms, e.g. any degree rotation, shearing -% etc. you will have to use the included 'reslice_nii.m' program -% to reslice the image volume. 'reslice_nii.m' is not called by -% any other program, and you have to run 'reslice_nii.m' explicitly -% for those NIfTI files that you want to reslice them. -% -% Since 'xform_nii.m' does not involve any interpolation or any -% slice change, the original image volume is supposed to be -% untouched, although it is translated, reflected, or even -% orthogonally rotated, based on the affine matrix in the -% NIfTI header. -% -% However, the affine matrix in the header of a lot NIfTI files -% contain slightly non-orthogonal rotation. Therefore, optional -% input parameter 'tolerance' is used to allow some distortion -% in the loaded image for any non-orthogonal rotation or shearing -% of NIfTI affine matrix. If you set 'tolerance' to 0, it means -% that you do not allow any distortion. If you set 'tolerance' to -% 1, it means that you do not care any distortion. The image will -% fail to be loaded if it can not be tolerated. The tolerance will -% be set to 0.1 (10%), if it is default or empty. -% -% Because 'reslice_nii.m' has to perform 3D interpolation, it can -% be slow depending on image size and affine matrix in the header. -% -% After you perform the affine transform, the 'nii' structure -% generated from 'xform_nii.m' or new NIfTI file created from -% 'reslice_nii.m' will be in RAS orientation, i.e. X axis from -% Left to Right, Y axis from Posterior to Anterior, and Z axis -% from Inferior to Superior. -% -% NOTE: This function should be called immediately after load_nii. -% -% Usage: [ nii ] = xform_nii(nii, [tolerance], [preferredForm]) -% -% nii - NIFTI structure (returned from load_nii) -% -% tolerance (optional) - distortion allowed for non-orthogonal rotation -% or shearing in NIfTI affine matrix. It will be set to 0.1 (10%), -% if it is default or empty. -% -% preferredForm (optional) - selects which transformation from voxels -% to RAS coordinates; values are s,q,S,Q. Lower case s,q indicate -% "prefer sform or qform, but use others if preferred not present". -% Upper case indicate the program is forced to use the specificied -% tranform or fail loading. 'preferredForm' will be 's', if it is -% default or empty. - Jeff Gunter -% -% NIFTI data format can be found on: http://nifti.nimh.nih.gov -% -% - Jimmy Shen (jimmy@rotman-baycrest.on.ca) -% -function nii = xform_nii(nii, tolerance, preferredForm) - - % save a copy of the header as it was loaded. This is the - % header before any sform, qform manipulation is done. - % - nii.original.hdr = nii.hdr; - - if ~exist('tolerance','var') | isempty(tolerance) - tolerance = 0.1; - elseif(tolerance<=0) - tolerance = eps; - end - - if ~exist('preferredForm','var') | isempty(preferredForm) - preferredForm= 's'; % Jeff - end - - % if scl_slope field is nonzero, then each voxel value in the - % dataset should be scaled as: y = scl_slope * x + scl_inter - % I bring it here because hdr will be modified by change_hdr. - % - if nii.hdr.dime.scl_slope ~= 0 & ... - ismember(nii.hdr.dime.datatype, [2,4,8,16,64,256,512,768]) & ... - (nii.hdr.dime.scl_slope ~= 1 | nii.hdr.dime.scl_inter ~= 0) - - nii.img = ... - nii.hdr.dime.scl_slope * double(nii.img) + nii.hdr.dime.scl_inter; - - if nii.hdr.dime.datatype == 64 - - nii.hdr.dime.datatype = 64; - nii.hdr.dime.bitpix = 64; - else - nii.img = single(nii.img); - - nii.hdr.dime.datatype = 16; - nii.hdr.dime.bitpix = 32; - end - - nii.hdr.dime.glmax = max(double(nii.img(:))); - nii.hdr.dime.glmin = min(double(nii.img(:))); - - % set scale to non-use, because it is applied in xform_nii - % - nii.hdr.dime.scl_slope = 0; - - end - - % However, the scaling is to be ignored if datatype is DT_RGB24. - - % If datatype is a complex type, then the scaling is to be applied - % to both the real and imaginary parts. - % - if nii.hdr.dime.scl_slope ~= 0 & ... - ismember(nii.hdr.dime.datatype, [32,1792]) - - nii.img = ... - nii.hdr.dime.scl_slope * double(nii.img) + nii.hdr.dime.scl_inter; - - if nii.hdr.dime.datatype == 32 - nii.img = single(nii.img); - end - - nii.hdr.dime.glmax = max(double(nii.img(:))); - nii.hdr.dime.glmin = min(double(nii.img(:))); - - % set scale to non-use, because it is applied in xform_nii - % - nii.hdr.dime.scl_slope = 0; - - end - - % There is no need for this program to transform Analyze data - % - if nii.filetype == 0 & exist([nii.fileprefix '.mat'],'file') - load([nii.fileprefix '.mat']); % old SPM affine matrix - R=M(1:3,1:3); - T=M(1:3,4); - T=R*ones(3,1)+T; - M(1:3,4)=T; - nii.hdr.hist.qform_code=0; - nii.hdr.hist.sform_code=1; - nii.hdr.hist.srow_x=M(1,:); - nii.hdr.hist.srow_y=M(2,:); - nii.hdr.hist.srow_z=M(3,:); - elseif nii.filetype == 0 - nii.hdr.hist.rot_orient = []; - nii.hdr.hist.flip_orient = []; - return; % no sform/qform for Analyze format - end - - hdr = nii.hdr; - - [hdr,orient]=change_hdr(hdr,tolerance,preferredForm); - - % flip and/or rotate image data - % - if ~isequal(orient, [1 2 3]) - - old_dim = hdr.dime.dim([2:4]); - - % More than 1 time frame - % - if ndims(nii.img) > 3 - pattern = 1:prod(old_dim); - else - pattern = []; - end - - if ~isempty(pattern) - pattern = reshape(pattern, old_dim); - end - - % calculate for rotation after flip - % - rot_orient = mod(orient + 2, 3) + 1; - - % do flip: - % - flip_orient = orient - rot_orient; - - for i = 1:3 - if flip_orient(i) - if ~isempty(pattern) - pattern = flipdim(pattern, i); - else - nii.img = flipdim(nii.img, i); - end - end - end - - % get index of orient (rotate inversely) - % - [tmp rot_orient] = sort(rot_orient); - - new_dim = old_dim; - new_dim = new_dim(rot_orient); - hdr.dime.dim([2:4]) = new_dim; - - new_pixdim = hdr.dime.pixdim([2:4]); - new_pixdim = new_pixdim(rot_orient); - hdr.dime.pixdim([2:4]) = new_pixdim; - - % re-calculate originator - % - tmp = hdr.hist.originator([1:3]); - tmp = tmp(rot_orient); - flip_orient = flip_orient(rot_orient); - - for i = 1:3 - if flip_orient(i) & ~isequal(tmp(i), 0) - tmp(i) = new_dim(i) - tmp(i) + 1; - end - end - - hdr.hist.originator([1:3]) = tmp; - hdr.hist.rot_orient = rot_orient; - hdr.hist.flip_orient = flip_orient; - - % do rotation: - % - if ~isempty(pattern) - pattern = permute(pattern, rot_orient); - pattern = pattern(:); - - if hdr.dime.datatype == 32 | hdr.dime.datatype == 1792 | ... - hdr.dime.datatype == 128 | hdr.dime.datatype == 511 - - tmp = reshape(nii.img(:,:,:,1), [prod(new_dim) hdr.dime.dim(5:8)]); - tmp = tmp(pattern, :); - nii.img(:,:,:,1) = reshape(tmp, [new_dim hdr.dime.dim(5:8)]); - - tmp = reshape(nii.img(:,:,:,2), [prod(new_dim) hdr.dime.dim(5:8)]); - tmp = tmp(pattern, :); - nii.img(:,:,:,2) = reshape(tmp, [new_dim hdr.dime.dim(5:8)]); - - if hdr.dime.datatype == 128 | hdr.dime.datatype == 511 - tmp = reshape(nii.img(:,:,:,3), [prod(new_dim) hdr.dime.dim(5:8)]); - tmp = tmp(pattern, :); - nii.img(:,:,:,3) = reshape(tmp, [new_dim hdr.dime.dim(5:8)]); - end - - else - nii.img = reshape(nii.img, [prod(new_dim) hdr.dime.dim(5:8)]); - nii.img = nii.img(pattern, :); - nii.img = reshape(nii.img, [new_dim hdr.dime.dim(5:8)]); - end - else - if hdr.dime.datatype == 32 | hdr.dime.datatype == 1792 | ... - hdr.dime.datatype == 128 | hdr.dime.datatype == 511 - - nii.img(:,:,:,1) = permute(nii.img(:,:,:,1), rot_orient); - nii.img(:,:,:,2) = permute(nii.img(:,:,:,2), rot_orient); - - if hdr.dime.datatype == 128 | hdr.dime.datatype == 511 - nii.img(:,:,:,3) = permute(nii.img(:,:,:,3), rot_orient); - end - else - nii.img = permute(nii.img, rot_orient); - end - end - else - hdr.hist.rot_orient = []; - hdr.hist.flip_orient = []; - end - - nii.hdr = hdr; - - return; % xform_nii - - -%----------------------------------------------------------------------- -function [hdr, orient] = change_hdr(hdr, tolerance, preferredForm) - - orient = [1 2 3]; - affine_transform = 1; - - % NIFTI can have both sform and qform transform. This program - % will check sform_code prior to qform_code by default. - % - % If user specifys "preferredForm", user can then choose the - % priority. - Jeff - % - useForm=[]; % Jeff - - if isequal(preferredForm,'S') - if isequal(hdr.hist.sform_code,0) - error('User requires sform, sform not set in header'); - else - useForm='s'; - end - end % Jeff - - if isequal(preferredForm,'Q') - if isequal(hdr.hist.qform_code,0) - error('User requires qform, qform not set in header'); - else - useForm='q'; - end - end % Jeff - - if isequal(preferredForm,'s') - if hdr.hist.sform_code > 0 - useForm='s'; - elseif hdr.hist.qform_code > 0 - useForm='q'; - end - end % Jeff - - if isequal(preferredForm,'q') - if hdr.hist.qform_code > 0 - useForm='q'; - elseif hdr.hist.sform_code > 0 - useForm='s'; - end - end % Jeff - - if isequal(useForm,'s') - R = [hdr.hist.srow_x(1:3) - hdr.hist.srow_y(1:3) - hdr.hist.srow_z(1:3)]; - - T = [hdr.hist.srow_x(4) - hdr.hist.srow_y(4) - hdr.hist.srow_z(4)]; - - if det(R) == 0 | ~isequal(R(find(R)), sum(R)') - hdr.hist.old_affine = [ [R;[0 0 0]] [T;1] ]; - R_sort = sort(abs(R(:))); - R( find( abs(R) < tolerance*min(R_sort(end-2:end)) ) ) = 0; - hdr.hist.new_affine = [ [R;[0 0 0]] [T;1] ]; - - if det(R) == 0 | ~isequal(R(find(R)), sum(R)') - msg = [char(10) char(10) ' Non-orthogonal rotation or shearing ']; - msg = [msg 'found inside the affine matrix' char(10)]; - msg = [msg ' in this NIfTI file. You have 3 options:' char(10) char(10)]; - msg = [msg ' 1. Using included ''reslice_nii.m'' program to reslice the NIfTI' char(10)]; - msg = [msg ' file. I strongly recommand this, because it will not cause' char(10)]; - msg = [msg ' negative effect, as long as you remember not to do slice' char(10)]; - msg = [msg ' time correction after using ''reslice_nii.m''.' char(10) char(10)]; - msg = [msg ' 2. Using included ''load_untouch_nii.m'' program to load image' char(10)]; - msg = [msg ' without applying any affine geometric transformation or' char(10)]; - msg = [msg ' voxel intensity scaling. This is only for people who want' char(10)]; - msg = [msg ' to do some image processing regardless of image orientation' char(10)]; - msg = [msg ' and to save data back with the same NIfTI header.' char(10) char(10)]; - msg = [msg ' 3. Increasing the tolerance to allow more distortion in loaded' char(10)]; - msg = [msg ' image, but I don''t suggest this.' char(10) char(10)]; - msg = [msg ' To get help, please type:' char(10) char(10) ' help reslice_nii.m' char(10)]; - msg = [msg ' help load_untouch_nii.m' char(10) ' help load_nii.m']; - error(msg); - end - end - - elseif isequal(useForm,'q') - b = hdr.hist.quatern_b; - c = hdr.hist.quatern_c; - d = hdr.hist.quatern_d; - - if 1.0-(b*b+c*c+d*d) < 0 - if abs(1.0-(b*b+c*c+d*d)) < 1e-5 - a = 0; - else - error('Incorrect quaternion values in this NIFTI data.'); - end - else - a = sqrt(1.0-(b*b+c*c+d*d)); - end - - qfac = hdr.dime.pixdim(1); - if qfac==0, qfac = 1; end - i = hdr.dime.pixdim(2); - j = hdr.dime.pixdim(3); - k = qfac * hdr.dime.pixdim(4); - - R = [a*a+b*b-c*c-d*d 2*b*c-2*a*d 2*b*d+2*a*c - 2*b*c+2*a*d a*a+c*c-b*b-d*d 2*c*d-2*a*b - 2*b*d-2*a*c 2*c*d+2*a*b a*a+d*d-c*c-b*b]; - - T = [hdr.hist.qoffset_x - hdr.hist.qoffset_y - hdr.hist.qoffset_z]; - - % qforms are expected to generate rotation matrices R which are - % det(R) = 1; we'll make sure that happens. - % - % now we make the same checks as were done above for sform data - % BUT we do it on a transform that is in terms of voxels not mm; - % after we figure out the angles and squash them to closest - % rectilinear direction. After that, the voxel sizes are then - % added. - % - % This part is modified by Jeff Gunter. - % - if det(R) == 0 | ~isequal(R(find(R)), sum(R)') - - % det(R) == 0 is not a common trigger for this --- - % R(find(R)) is a list of non-zero elements in R; if that - % is straight (not oblique) then it should be the same as - % columnwise summation. Could just as well have checked the - % lengths of R(find(R)) and sum(R)' (which should be 3) - % - hdr.hist.old_affine = [ [R * diag([i j k]);[0 0 0]] [T;1] ]; - R_sort = sort(abs(R(:))); - R( find( abs(R) < tolerance*min(R_sort(end-2:end)) ) ) = 0; - R = R * diag([i j k]); - hdr.hist.new_affine = [ [R;[0 0 0]] [T;1] ]; - - if det(R) == 0 | ~isequal(R(find(R)), sum(R)') - msg = [char(10) char(10) ' Non-orthogonal rotation or shearing ']; - msg = [msg 'found inside the affine matrix' char(10)]; - msg = [msg ' in this NIfTI file. You have 3 options:' char(10) char(10)]; - msg = [msg ' 1. Using included ''reslice_nii.m'' program to reslice the NIfTI' char(10)]; - msg = [msg ' file. I strongly recommand this, because it will not cause' char(10)]; - msg = [msg ' negative effect, as long as you remember not to do slice' char(10)]; - msg = [msg ' time correction after using ''reslice_nii.m''.' char(10) char(10)]; - msg = [msg ' 2. Using included ''load_untouch_nii.m'' program to load image' char(10)]; - msg = [msg ' without applying any affine geometric transformation or' char(10)]; - msg = [msg ' voxel intensity scaling. This is only for people who want' char(10)]; - msg = [msg ' to do some image processing regardless of image orientation' char(10)]; - msg = [msg ' and to save data back with the same NIfTI header.' char(10) char(10)]; - msg = [msg ' 3. Increasing the tolerance to allow more distortion in loaded' char(10)]; - msg = [msg ' image, but I don''t suggest this.' char(10) char(10)]; - msg = [msg ' To get help, please type:' char(10) char(10) ' help reslice_nii.m' char(10)]; - msg = [msg ' help load_untouch_nii.m' char(10) ' help load_nii.m']; - error(msg); - end - - else - R = R * diag([i j k]); - end % 1st det(R) - - else - affine_transform = 0; % no sform or qform transform - end - - if affine_transform == 1 - voxel_size = abs(sum(R,1)); - inv_R = inv(R); - originator = inv_R*(-T)+1; - orient = get_orient(inv_R); - - % modify pixdim and originator - % - hdr.dime.pixdim(2:4) = voxel_size; - hdr.hist.originator(1:3) = originator; - - % set sform or qform to non-use, because they have been - % applied in xform_nii - % - hdr.hist.qform_code = 0; - hdr.hist.sform_code = 0; - end - - % apply space_unit to pixdim if not 1 (mm) - % - space_unit = get_units(hdr); - - if space_unit ~= 1 - hdr.dime.pixdim(2:4) = hdr.dime.pixdim(2:4) * space_unit; - - % set space_unit of xyzt_units to millimeter, because - % voxel_size has been re-scaled - % - hdr.dime.xyzt_units = char(bitset(hdr.dime.xyzt_units,1,0)); - hdr.dime.xyzt_units = char(bitset(hdr.dime.xyzt_units,2,1)); - hdr.dime.xyzt_units = char(bitset(hdr.dime.xyzt_units,3,0)); - end - - hdr.dime.pixdim = abs(hdr.dime.pixdim); - - return; % change_hdr - - -%----------------------------------------------------------------------- -function orient = get_orient(R) - - orient = []; - - for i = 1:3 - switch find(R(i,:)) * sign(sum(R(i,:))) - case 1 - orient = [orient 1]; % Left to Right - case 2 - orient = [orient 2]; % Posterior to Anterior - case 3 - orient = [orient 3]; % Inferior to Superior - case -1 - orient = [orient 4]; % Right to Left - case -2 - orient = [orient 5]; % Anterior to Posterior - case -3 - orient = [orient 6]; % Superior to Inferior - end - end - - return; % get_orient - - -%----------------------------------------------------------------------- -function [space_unit, time_unit] = get_units(hdr) - - switch bitand(hdr.dime.xyzt_units, 7) % mask with 0x07 - case 1 - space_unit = 1e+3; % meter, m - case 3 - space_unit = 1e-3; % micrometer, um - otherwise - space_unit = 1; % millimeter, mm - end - - switch bitand(hdr.dime.xyzt_units, 56) % mask with 0x38 - case 16 - time_unit = 1e-3; % millisecond, ms - case 24 - time_unit = 1e-6; % microsecond, us - otherwise - time_unit = 1; % second, s - end - - return; % get_units - diff --git a/FakeData/FD.mat b/FakeData/FD.mat deleted file mode 100644 index 9c9b8e7..0000000 Binary files a/FakeData/FD.mat and /dev/null differ diff --git a/FakeData/TC.mat b/FakeData/TC.mat deleted file mode 100644 index 3f4a136..0000000 Binary files a/FakeData/TC.mat and /dev/null differ diff --git a/FakeData/brain_info.mat b/FakeData/brain_info.mat deleted file mode 100644 index 0500ea0..0000000 Binary files a/FakeData/brain_info.mat and /dev/null differ diff --git a/FakeData/mask.mat b/FakeData/mask.mat deleted file mode 100644 index 6078006..0000000 Binary files a/FakeData/mask.mat and /dev/null differ diff --git a/FakeData/seed1.mat b/FakeData/seed1.mat deleted file mode 100644 index fa06e6e..0000000 Binary files a/FakeData/seed1.mat and /dev/null differ diff --git a/FakeData/seed2.mat b/FakeData/seed2.mat deleted file mode 100644 index 65b7701..0000000 Binary files a/FakeData/seed2.mat and /dev/null differ diff --git a/FakeData/seed3.mat b/FakeData/seed3.mat deleted file mode 100644 index 1d2f164..0000000 Binary files a/FakeData/seed3.mat and /dev/null differ diff --git a/Plotting/Create_CAP_colorbar.m b/Plotting/Create_CAP_colorbar.m deleted file mode 100755 index be85c38..0000000 --- a/Plotting/Create_CAP_colorbar.m +++ /dev/null @@ -1,56 +0,0 @@ -%% Creates a nice looking colorbar for the CAP display -function [Ah,h] = Create_CAP_colorbar(absmin,absmax,absstep,colT,lab,Ah,Ori,CB1,CB2,n_steps) - - H_range = [absmin absmax]; % The colormap is symmetric around zero - - % Set the Min/Max T-values for alpha coding - A_range = [0 1]; - - % Set the labels for the colorbar - hue_label = lab; - - colrange = linspace(absmin,absmax,256); - - switch Ori - case 'Horizontal' - - y = linspace(A_range(1), A_range(2), 256); - % x represents the range in alpha (abs(t-stats)) - x = linspace(H_range(1), H_range(2), 256); - % y represents the range in hue (beta weight difference) - [X,Y] = meshgrid(x,y); % Transform into a 2D matrix - - h=imagesc(x,y,X,'Parent',Ah); - axis(Ah,'xy'); % Plot the colorbar - set(Ah, 'Xcolor', 'k', 'Ycolor', 'k','YTickLabel','','YTick',[],'XTick',absmin:absstep:absmax,'FontSize',8); - set(Ah, 'XAxisLocation', 'bottom'); - xlabel(Ah,hue_label,'FontSize',8); - - A = ones(size(X)); - A(abs(X) < colT) = 0; - A = reshape(A,256,256); - - case 'Vertical' - - x = linspace(A_range(1), A_range(2), 256); - % x represents the range in alpha (abs(t-stats)) - y = linspace(H_range(1), H_range(2), 256); - % y represents the range in hue (beta weight difference) - [X,Y] = meshgrid(x,y); % Transform into a 2D matrix - - h=imagesc(x,y,Y,'Parent',Ah); - axis(Ah,'xy'); % Plot the colorbar - set(Ah, 'Xcolor', 'k', 'Ycolor', 'k','XTickLabel','','XTick',[],'YTick',absmin:absstep:absmax,'FontSize',8); - set(Ah, 'YAxisLocation', 'right'); - ylabel(Ah,hue_label,'FontSize',8); - - A = ones(size(Y)); - A(abs(Y) < colT) = 0; - A = reshape(A,256,256); - end - - tmp_cmap = cbrewer(CB1,CB2,n_steps); - colormap(Ah,flipud(tmp_cmap)); - - set(h,'AlphaData',A); -end \ No newline at end of file diff --git a/Plotting/MakeViolin.m b/Plotting/MakeViolin.m deleted file mode 100755 index d7adf60..0000000 --- a/Plotting/MakeViolin.m +++ /dev/null @@ -1,55 +0,0 @@ -%% Makes a violin plot with overlapped boxplot, tuning colors -% Y and O are vectors containing the data to plot for the two conditions -% Color has light colors for background (first cell) and dark colors for -% average (second cell), for maximum 4 populations -function [box,h_viol,ah] = MakeViolin(Y,ah,Lab,YLabel,Color,n_pop,n_states) - - % Range of the plot - Max = max(max((Y))); - Min = min(min((Y))); - Max = Max + 0.15*max(abs(Min),Max); - Min = Min - 0.15*max(abs(Min),Max); - - % Plots the distribution - ylim(ah,[Min,Max]); - - % Colors for the background - Col_final = num2cell(Color{1},2); - Col_final = Col_final(1:n_pop); - Col_final = repmat(Col_final,n_states,1); - - % Colors for the foreground - Col_final2 = num2cell(Color{2},2); - Col_final2 = Col_final2(1:n_pop); - Col_final2 = repmat(Col_final2,n_states,1); - - Lab_final = {}; - - for i = 1:n_states - Lab_final = [Lab_final, repmat({Lab{i}},1,n_pop)]; - end - - h_viol = distributionPlot(ah,Y','showMM',0,'color',Col_final,'yLabel',YLabel,'xNames',Lab_final); - hold(ah,'on'); - box = boxplot(Y','Parent',ah,'Labels',Lab_final); - - h_box = findobj(box,'Tag','Box'); - set(h_box,'color','k','LineWidth',2); - - h_median = findobj(box,'Tag','Median'); - h_outliers = findobj(box,'Tag','Outliers'); - - for i = 1:n_pop - for j = 1:n_states - - idx_oi = i+(j-1)*n_pop; - - set(h_median(idx_oi),'color',Col_final2{idx_oi},'LineWidth',2); - set(h_outliers(idx_oi),'Marker','o','MarkerFaceColor',Col_final2{n_pop*n_states-idx_oi+1},... - 'MarkerEdgeColor',Col_final2{idx_oi},'MarkerSize',3); - - end - end - - set(ah,'Box','off'); -end \ No newline at end of file diff --git a/Plotting/cbrewer/cbrewer.m b/Plotting/cbrewer/cbrewer.m deleted file mode 100644 index 26be891..0000000 --- a/Plotting/cbrewer/cbrewer.m +++ /dev/null @@ -1,128 +0,0 @@ -function [colormap]=cbrewer(ctype, cname, ncol, interp_method) -% -% CBREWER - This function produces a colorbrewer table (rgb data) for a -% given type, name and number of colors of the colorbrewer tables. -% For more information on 'colorbrewer', please visit -% http://colorbrewer2.org/ -% -% The tables were generated from an MS-Excel file provided on the website -% http://www.personal.psu.edu/cab38/ColorBrewer/ColorBrewer_updates.html -% -% -% [colormap]=cbrewer(ctype, cname, ncol, interp_method) -% -% INPUT: -% - ctype: type of color table 'seq' (sequential), 'div' (diverging), 'qual' (qualitative) -% - cname: name of colortable. It changes depending on ctype. -% - ncol: number of color in the table. It changes according to ctype and -% cname -% - interp_method: interpolation method (see interp1.m). Default is "cubic" ) -% -% A note on the number of colors: Based on the original data, there is -% only a certain number of colors available for each type and name of -% colortable. When 'ncol' is larger then the maximum number of colors -% originally given, an interpolation routine is called (interp1) to produce -% the "extended" colormaps. -% -% Example: To produce a colortable CT of ncol X 3 entries (RGB) of -% sequential type and named 'Blues' with 8 colors: -% CT=cbrewer('seq', 'Blues', 8); -% To use this colortable as colormap, simply call: -% colormap(CT) -% -% To see the various colormaps available according to their types and -% names, simply call: cbrewer() -% -% This product includes color specifications and designs developed by -% Cynthia Brewer (http://colorbrewer.org/). -% -% Author: Charles Robert -% email: tannoudji@hotmail.com -% Date: 06.12.2011 -% ------------------------------ -% 18.09.2015 Minor fixes, fixed a bug where the 'spectral' color table did not appear in the preview - - -% load colorbrewer data -load('colorbrewer.mat') -% initialise the colormap is there are any problems -colormap=[]; -if (~exist('interp_method', 'var')) - interp_method='cubic'; -end - -% If no arguments -if (~exist('ctype', 'var') | ~exist('cname', 'var') | ~exist('ncol', 'var')) - disp(' ') - disp('[colormap] = cbrewer(ctype, cname, ncol [, interp_method])') - disp(' ') - disp('INPUT:') - disp(' - ctype: type of color table *seq* (sequential), *div* (divergent), *qual* (qualitative)') - disp(' - cname: name of colortable. It changes depending on ctype.') - disp(' - ncol: number of color in the table. It changes according to ctype and cname') - disp(' - interp_method: interpolation method (see interp1.m). Default is "cubic" )') - - disp(' ') - disp('Sequential tables:') - z={'Blues','BuGn','BuPu','GnBu','Greens','Greys','Oranges','OrRd','PuBu','PuBuGn','PuRd',... - 'Purples','RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd', 'Spectral'}; - disp(z') - - disp('Divergent tables:') - z={'BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn'}; - disp(z') - - disp(' ') - disp('Qualitative tables:') - %getfield(colorbrewer, 'qual') - z={'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3'}; - disp(z') - - plot_brewer_cmap - return -end - -% Verify that the input is appropriate -ctype_names={'div', 'seq', 'qual'}; -if (~ismember(ctype,ctype_names)) - disp('ctype must be either: *div*, *seq* or *qual*') - colormap=[]; - return -end - -if (~isfield(colorbrewer.(ctype),cname)) - disp(['The name of the colortable of type *' ctype '* must be one of the following:']) - getfield(colorbrewer, ctype) - colormap=[]; - return -end - -if (ncol>length(colorbrewer.(ctype).(cname))) -% disp(' ') -% disp('----------------------------------------------------------------------') -% disp(['The maximum number of colors for table *' cname '* is ' num2str(length(colorbrewer.(ctype).(cname)))]) -% disp(['The new colormap will be extrapolated from these ' num2str(length(colorbrewer.(ctype).(cname))) ' values']) -% disp('----------------------------------------------------------------------') -% disp(' ') - cbrew_init=colorbrewer.(ctype).(cname){length(colorbrewer.(ctype).(cname))}; - colormap=interpolate_cbrewer(cbrew_init, interp_method, ncol); - colormap=colormap./255; - return -end - -if (isempty(colorbrewer.(ctype).(cname){ncol})) - - while(isempty(colorbrewer.(ctype).(cname){ncol})) - ncol=ncol+1; - end - disp(' ') - disp('----------------------------------------------------------------------') - disp(['The minimum number of colors for table *' cname '* is ' num2str(ncol)]) - disp('This minimum value shall be defined as ncol instead') - disp('----------------------------------------------------------------------') - disp(' ') -end - -colormap=(colorbrewer.(ctype).(cname){ncol})./255; - -end \ No newline at end of file diff --git a/Plotting/cbrewer/cbrewer_preview.jpg b/Plotting/cbrewer/cbrewer_preview.jpg deleted file mode 100644 index bd2830a..0000000 Binary files a/Plotting/cbrewer/cbrewer_preview.jpg and /dev/null differ diff --git a/Plotting/cbrewer/change_jet.m b/Plotting/cbrewer/change_jet.m deleted file mode 100644 index b8d4ecb..0000000 --- a/Plotting/cbrewer/change_jet.m +++ /dev/null @@ -1,64 +0,0 @@ -% This script help produce a new 'jet'-like colormap based on other RGB reference colors - -% ------- I WAS ASKED --------------- -% "is there a chance that you could add a diverging map going from blue to green to red as in jet, -% but using the red and blue from your RdBu map and the third darkest green from your RdYlGn map?"" -% -% ANSWER: -% You should construct the new colormap based on the existing RGB values of 'jet' -% but projecting these RGB values on your new RGB basis. -% ----------------------------------- - -% load colormaps -jet=colormap('jet'); -RdBu=cbrewer('div', 'RdBu', 11); -RdYlGn=cbrewer('div', 'RdYlGn', 11); - -% Define the new R, G, B references (p stands for prime) -Rp=RdBu(1,:); -Bp=RdBu(end, :); -Gp=RdYlGn(end-2, :); -RGBp=[Rp;Gp;Bp]; - -% construct the new colormap based on the existing RGB values of jet -% Project the RGB values on your new basis -newjet = jet*RGBp; - -% store data in a strcuture, easier to handle -cmap.jet=jet; -cmap.newjet=newjet; -cnames={'jet', 'newjet'}; - -% plot the RGB values -fh=figure(); -colors={'r', 'g', 'b'}; -for iname=1:length(cnames) - subplot(length(cnames),1,iname) - dat=cmap.(cnames{end-iname+1}); - for icol=1:size(dat,2) - plot(dat(:,icol), 'color', colors{icol}, 'linewidth', 2);hold on; - end % icol - title([' "' cnames{end-iname+1} '" in RGB plot']) -end - -% plot the colormaps -fh=figure(); -for iname=1:length(cnames) - F=cmap.(cnames{iname}); - ncol=length(F); - fg=1./ncol; % geometrical factor - X=fg.*[0 0 1 1]; - Y=0.1.*[1 0 0 1]+(2*iname-1)*0.1; - - for icol=1:ncol - X2=X+fg.*(icol-1); - fill(X2,Y,F(icol, :), 'linestyle', 'none') - hold all - end % icol - text(-0.1, mean(Y), cnames{iname}, 'HorizontalAlignment', 'right', 'FontWeight', 'bold', 'FontSize', 10, 'FontName' , 'AvantGarde') - xlim([-0.4, 1]) - axis off - set(gcf, 'color', [1 1 1]) - ylim([0.1 1.05.*max(Y)]); - end % iname - diff --git a/Plotting/cbrewer/colorbrewer.mat b/Plotting/cbrewer/colorbrewer.mat deleted file mode 100644 index ec59ef4..0000000 Binary files a/Plotting/cbrewer/colorbrewer.mat and /dev/null differ diff --git a/Plotting/cbrewer/interpolate_cbrewer.m b/Plotting/cbrewer/interpolate_cbrewer.m deleted file mode 100644 index e8b5e21..0000000 --- a/Plotting/cbrewer/interpolate_cbrewer.m +++ /dev/null @@ -1,36 +0,0 @@ -function [interp_cmap]=interpolate_cbrewer(cbrew_init, interp_method, ncolors) -% -% INTERPOLATE_CBREWER - interpolate a colorbrewer map to ncolors levels -% -% INPUT: -% - cbrew_init: the initial colormap with format N*3 -% - interp_method: interpolation method, which can be the following: -% 'nearest' - nearest neighbor interpolation -% 'linear' - bilinear interpolation -% 'spline' - spline interpolation -% 'cubic' - bicubic interpolation as long as the data is -% uniformly spaced, otherwise the same as 'spline' -% - ncolors=desired number of colors -% -% Author: Charles Robert -% email: tannoudji@hotmail.com -% Date: 14.10.2011 - - -% just to make sure, in case someone puts in a decimal -ncolors=round(ncolors); - -% How many data points of the colormap available -nmax=size(cbrew_init,1); - -% create the associated X axis (using round to get rid of decimals) -a=(ncolors-1)./(nmax-1); -X=round([0 a:a:(ncolors-1)]); -X2=0:ncolors-1; - -z=interp1(X,cbrew_init(:,1),X2,interp_method); -z2=interp1(X,cbrew_init(:,2),X2,interp_method); -z3=interp1(X,cbrew_init(:,3),X2, interp_method); -interp_cmap=round([z' z2' z3']); - -end \ No newline at end of file diff --git a/Plotting/cbrewer/plot_brewer_cmap.m b/Plotting/cbrewer/plot_brewer_cmap.m deleted file mode 100644 index a5cab9e..0000000 --- a/Plotting/cbrewer/plot_brewer_cmap.m +++ /dev/null @@ -1,50 +0,0 @@ -% Plots and identifies the various colorbrewer tables available. -% Is called by cbrewer.m when no arguments are given. -% -% Author: Charles Robert -% email: tannoudji@hotmail.com -% Date: 14.10.2011 - - - -load('colorbrewer.mat') - -ctypes={'div', 'seq', 'qual'}; -ctypes_title={'Diverging', 'Sequential', 'Qualitative'}; -cnames{1,:}={'BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral'}; -cnames{2,:}={'Blues','BuGn','BuPu','GnBu','Greens','Greys','Oranges','OrRd','PuBu','PuBuGn','PuRd',... - 'Purples','RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd'}; -cnames{3,:}={'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3'}; - -figure('position', [314 327 807 420]) -for itype=1:3 - - %fh(itype)=figure(); - subplot(1,3,itype) - - for iname=1:length(cnames{itype,:}) - - ncol=length(colorbrewer.(ctypes{itype}).(cnames{itype}{iname})); - fg=1./ncol; % geometrical factor - - X=fg.*[0 0 1 1]; - Y=0.1.*[1 0 0 1]+(2*iname-1)*0.1; - F=cbrewer(ctypes{itype}, cnames{itype}{iname}, ncol); - - for icol=1:ncol - X2=X+fg.*(icol-1); - fill(X2,Y,F(icol, :), 'linestyle', 'none') - text(-0.1, mean(Y), cnames{itype}{iname}, 'HorizontalAlignment', 'right', 'FontWeight', 'bold', 'FontSize', 10, 'FontName' , 'AvantGarde') - xlim([-0.4, 1]) - hold all - end % icol - %set(gca, 'box', 'off') - title(ctypes_title{itype}, 'FontWeight', 'bold', 'FontSize', 16, 'FontName' , 'AvantGarde') - axis off - set(gcf, 'color', [1 1 1]) - end % iname - ylim([0.1 1.05.*max(Y)]); -end %itype - -set(gcf, 'MenuBar', 'none') -set(gcf, 'Name', 'ColorBrewer Color maps') \ No newline at end of file diff --git a/Plotting/distributionPlot.m b/Plotting/distributionPlot.m deleted file mode 100755 index 352cc69..0000000 --- a/Plotting/distributionPlot.m +++ /dev/null @@ -1,902 +0,0 @@ -function handles = distributionPlot(varargin) -%DISTRIBUTIONPLOT creates violin plots for convenient visualization of multiple distributions -% -% SYNOPSIS: handles = distributionPlot(data,propertyName,propertyValue,...) -% handles = distributionPlot(ah,...) -% -% INPUT data : m-by-nData array of values, or vector of grouped data (use -% the 'groups' property to specify the grouping variable), or -% cell array of length nData. -% The cell array can either contain vectors with values, or -% m-by-2 arrays with [bins,counts] if you want to determine the -% histograms by yourself (m can be different between cell -% elements). Note that arrays inside cells with any -% other shape than m-by-2 are reshaped to vector an a warning is -% thrown (DISTRIBUTIONPLOT:AUTORESHAPE). -% -% DISTRIBUTIONPLOT accepts the following propertyName/propertyValue -% pairs (all are optional): -% -% distWidth : width of distributions; ideally between 0 and 1. -% 1 means that adjacent distributions might touch. Default: 0.9 -% variableWidth : If true, the width of the distribution changes, -% reflecting the shape of the histogram of the data. If false, -% the distribution is only encoded by color levels. Default: true -% color : uniform coloring of histograms. Supply either a color -% string ('r'), or a truecolor vector ([1 0 0]). Use a -% cell array of length nData to specify one color per -% distribution. Default: 'k' -% If variableWidth is set to false, a colormap is generated that -% goes from white to the chose color (or from black, if -% invert==true). -% If both 'color', and 'colormap' are specified, 'colormap' takes -% precedence. -% colormap : colormap used to describe the distribution (first row -% corresponds to bins with least data, last row corresponds to -% bins with most data (invert the grayscale colormap to have -% black indicate the most data). -% Supply a cell array of length nData to color distributions -% individually. Note that using multiple colormaps means that -% the colorbar doesn't contain much useful information. -% Default: [] -% Colormap will index into the figure colormap, which will be -% modified by distributionPlot. This is done to allow editing the -% distributions in e.g. Adobe Illustrator. -% If both 'color', and 'colormap' are specified, 'colormap' takes -% precedence. -% globalNorm : normalization for bin width (x-direction) -% 0 : every histogram is normalized individually so that the -% maximum bin width is equal to distWidth. This is best -% suited to comparing distribution shapes. Default. -% 1 : histograms are normalized such that equal bin width -% reports equal numbers of counts per bin. -% 2 : histograms are normalized so that the relative areas -% covered by the histograms reflect the relative total number -% of data points. -% 3 : histograms areas are normalized so that relative densities -% are the same across histograms. Thus, if -% data = {rand(100,1),rand(500,1)}, -% then -% distributionPlot(data,'globalNorm',2,'histOpt',0,'divFactor',10) -% shows the left histogram 5x as wide as the right, while -% distributionPlot(data,'globalNorm',3,'histOpt',0,'divFactor',10) -% displays both histograms equally wide, since each bin -% contains ~10% of the data. -% Options 1 and 2 produce similar results if the bins are spaced -% equally for the distributions. Options 0 and 3 produce similar -% results if the data are drawn from the same distributions. -% Note that colormaps currently always report the number of data -% points per bin; 'globalNorm' only applies to the distribution -% shape. -% -% groups : grouping variable for grouped data. Grouping will be -% resolved by calling grp2idx, and unless xNames have -% been supplied, group names determine the x-labels. -% If the grouping variable is numeric, group labels also -% determine x-values, unless the parameter xValues has -% been specified. -% histOpt : histogram type to plot -% 0 : use hist command (no smoothing, fixed number of -% bins) -% 1 : smoothened histogram using ksdensity with -% Normal kernel. Default. -% 1.1: smoothened histogram using ksdensity where the -% kernel is robustly estimated via histogram.m. -% Normal kernel. -% 2 : histogram command (no smoothing, automatic -% determination of thickness (y-direction) of bins) -% divFactor : Parameter dependent on histOpt. If... -% histOpt == 0: divFactor = # of bins. Default: 25. -% Alternatively, pass a vector which will be -% interpreted as bin centers. -% histOpt == 1: divFactor decides by how much the default -% kernel-width is multiplied in order to avoid an -% overly smooth histogram. Default: 1/2 -% histOpt == 2: divFactor decides by how much the -% automatic bin width is multiplied in order to have -% more (<1) or less (>1) detail. Default: 1 -% addSpread : if 1, data points are plotted with plotSpread. -% distWidth is ideally set to 0.95 -% This option is not available if the data is supplied as -% histograms. -% Please download plotSpread.m separately from the File -% Exchange using the link in the remarks -% showMM : if 1, mean and median are shown as red crosses and -% green squares, respectively. This is the default -% 2: only mean -% 3: only median -% 4: mean +/- standard error of the mean (no median) -% 5: mean +/- standard deviation (no median) -% 6: draw lines at the 25,50,75 percentiles (no mean) -% 0: plot neither mean nor median -% xValues: x-coordinate where the data should be plotted. -% If xValues are given, "distWidth" is scaled by the median -% difference between adjacent (sorted) x-values. Note that -% this may lead to overlapping distributions. Default: -% 1:nData -% xNames : cell array of length nData containing x-tick names -% (instead of the default '1,2,3') -% xMode : if 'auto', x-ticks are spaced automatically. If 'manual', -% there is a tick for each distribution. If xNames is -% provided as input, xMode is forced to 'manual'. Default: -% 'manual'. -% NOTE: SPECIFYING XNAMES OR XVALUES OR XMODE WILL ERASE PREVIOUS -% LABELS IF PLOTTING INTO EXISTING AXES -% yLabel : string with label for y-axis. Default : '' -% If empty and data is histograms, ylabel is set to 'counts' -% invert : if 1, axes color is changed to black, and colormap is -% inverted. -% histOri: Orientation of histogram. Either 'center', 'left', or -% 'right'. With 'left' or 'right', the left or right half of -% the standard violin plot is shown. Has no effect if -% variableWidth is false. Default: center -% xyOri : orientation of axes. Either 'normal' (=default), or -% 'flipped'. If 'flipped', the x-and y-axes are switched, so -% that violin plots are horizontal. Consequently, -% axes-specific properties, such as 'yLabel' are applied to -% the other axis. -% widthDiv : 1-by-2 array with [numberOfDivisions,currentDivision] -% widthDiv allows cutting the stripe dedicated to a single -% distribution into multible bands, which can be filled with -% sequential calls to distributionPlot. This is one way -% to compare two (or more) sequences of distributions. See -% example below. -% ah : axes handle to plot the distributions. Default: gca -% -% OUTPUT handles : 1-by-4 cell array with patch-handles for the -% distributions, plot handles for mean/median, the -% axes handle, and the plotSpread-points handle -% -% -% EXAMPLES -% %--Distributions contain more information than boxplot can capture -% r = rand(1000,1); -% rn = randn(1000,1)*0.38+0.5; -% rn2 = [randn(500,1)*0.1+0.27;randn(500,1)*0.1+0.73]; -% rn2=min(rn2,1);rn2=max(rn2,0); -% figure -% ah(1)=subplot(3,4,1:2); -% boxplot([r,rn,rn2]) -% ah(2)=subplot(3,4,3:4); -% distributionPlot([r,rn,rn2],'histOpt',2); % histOpt=2 works better for uniform distributions than the default -% set(ah,'ylim',[-1 2]) -% -% %--- additional options -% -% data = [randn(100,1);randn(50,1)+4;randn(25,1)+8]; -% subplot(3,4,5) -% -% %--- defaults -% distributionPlot(data); -% subplot(3,4,6) -% -% %--- show density via custom colormap only, show mean/std, -% distributionPlot(data,'colormap',copper,'showMM',5,'variableWidth',false) -% subplot(3,4,7:8) -% -% %--- auto-binwidth depends on # of datapoints; for small n, plotting the data is useful -% % note that this option requires the additional installation -% % of plotSpread from the File Exchange (link below) -% distributionPlot({data(1:5:end),repmat(data,2,1)},'addSpread',true,'showMM',false,'histOpt',2) -% -% %--- show quantiles -% subplot(3,4,9),distributionPlot(randn(100,1),'showMM',6) -% -% %--- horizontal orientation -% subplot(3,4,10:11), -% distributionPlot({chi2rnd(3,1000,1),chi2rnd(5,1000,1)},'xyOri','flipped','histOri','right','showMM',0), -% xlim([-3 13]) -% -% %--- compare distributions side-by-side (see also example below) -% % plotting into specified axes will throw a warning that you can -% % turn off using " warning off DISTRIBUTIONPLOT:ERASINGLABELS " -% ah = subplot(3,4,12); -% subplot(3,4,12),distributionPlot(chi2rnd(3,1000,1),'histOri','right','color','r','widthDiv',[2 2],'showMM',0) -% subplot(3,4,12),distributionPlot(chi2rnd(5,1000,1),'histOri','left','color','b','widthDiv',[2 1],'showMM',0) -% -% %--Use globalNorm to generate meaningful colorbar -% data = {randn(100,1),randn(500,1)}; -% figure -% distributionPlot(data,'globalNorm',true,'colormap',1-gray(64),'histOpt',0,'divFactor',[-5:0.5:5]) -% colorbar -% -% %--Use widthDiv to compare two series of distributions -% data1 = randn(500,5); -% data2 = bsxfun(@plus,randn(500,5),0:0.1:0.4); -% figure -% distributionPlot(data1,'widthDiv',[2 1],'histOri','left','color','b','showMM',4) -% distributionPlot(gca,data2,'widthDiv',[2 2],'histOri','right','color','k','showMM',4) -% -% %--Christmas trees! -% x=meshgrid(1:10,1:10); -% xx = tril(x); -% xx = xx(xx>0); -% figure -% hh=distributionPlot({xx,xx,xx},'color','g','addSpread',1,'histOpt',2,'showMM',0); -% set(hh{4}{1},'color','r','marker','o') -% END -% -% REMARKS To show distributions as clouds of points (~beeswarm plot), -% and/or to use the option "addSpread", please download the -% additional function plotSpread.m from the File Exchange -% http://www.mathworks.com/matlabcentral/fileexchange/37105-plot-spread-points-beeswarm-plot -% -% I used to run ksdensity with the Epanechnikov kernel. However, -% for integer data, the shape of the kernel can produce peaks -% between the integers, which is not ideal (use histOpt=2 for -% integer valued data). -% -% A previous iteration of distributionPlot used the input -% specifications below. They still work to ensure backward -% compatibility, but are no longer supported or updated. -% handles = distributionPlot(data,distWidth,showMM,xNames,histOpt,divFactor,invert,addSpread,globalNorm) -% where distWidth of 1 means that the maxima -% of two adjacent distributions might touch. Negative numbers -% indicate that the distributions should have constant width, i.e -% the density is only expressed through greylevels. -% Values between 1 and 2 are like values between 0 and 1, except -% that densities are not expressed via graylevels. Default: 1.9 -% -% -% SEE ALSO histogram, ksdensity, plotSpread, boxplot, grp2idx -% - -% created with MATLAB ver.: 7.6.0.324 (R2008a) on Windows_NT -% -% created by: Jonas Dorn; jonas.dorn@gmail.com -% DATE: 08-Jul-2008 -% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%==================================== -%% TEST INPUT -%==================================== - -% set defaults -def.xNames = []; -def.showMM = 1; -def.distWidth = 0.9; -def.histOpt = 1; -def.divFactor = [25,2,1]; -def.invert = false; -def.colormap = []; -def.color = 'k'; -def.addSpread = false; -def.globalNorm = false; -def.variableWidth = true; -def.groups = []; -def.yLabel = ''; -def.xValues = ''; -def.xMode = 'manual'; -def.histOri = 'center'; -def.xyOri = 'normal'; -def.widthDiv = [1 1]; -isHistogram = false; %# this parameter is not set by input - - -if nargin == 0 || isempty(varargin{1}) - error('not enough input arguments') -end - -% check for axes handle -if ~iscell(varargin{1}) && isscalar(varargin{1}) == 1 && ... - ishandle(varargin{1}) && strcmp(get(varargin{1},'Type'),'axes') - ah = varargin{1}; - data = varargin{2}; - varargin(1:2) = []; - newAx = false; - - -else - ah = gca; - data = varargin{1}; - varargin(1) = []; - newAx = true; -end - -% check for current axes limits. Set NaN if the axes have no children -% yet - we need that in case we're building a complicated set of -% distributions -if ~isempty(get(ah,'children')) - xAxLim = xlim; - yAxLim = ylim; -else - [xAxLim,yAxLim] = deal([NaN NaN]); -end - -fh = get(ah,'Parent'); - -% check data. If not cell, convert -if ~iscell(data) - [nPoints,nData] = size(data); - data = mat2cell(data,nPoints,ones(nData,1)); -else - % get nData - data = data(:); - nData = length(data); - % make sure all are vectors - badCol = ~cellfun(@isvector,data) & ~cellfun(@isempty,data); - if any(badCol) - nCols = cellfun(@(x)(size(x,2)),data(badCol)); - if all(nCols==2) - % bins,counts - isHistogram = true; - else - warning('DISTRIBUTIONPLOT:AUTORESHAPE',... - 'Elements %s of the cell array are not vectors. They will be reshaped automatically',... - num2str(find(badCol)')); - data(badCol) = cellfun(@(x)(x(:)),data(badCol),'UniformOutput',false); - end - end -end - -parserObj = inputParser; -parserObj.FunctionName = 'distributionPlot'; -stdWidth = 1; % scaling parameter for variableWidth with uneven x-values -% check whether we're dealing with pN/pV or straight arguments -if ~isempty(varargin) && ~ischar(varargin{1}) && ~isstruct(varargin{1}) - % use old format - % distWidth,showMM,xNames,histOpt,divFactor,invert,addSpread,globalNorm - def.distWidth = 1.9; - parserObj.addOptional('distWidth',def.distWidth); - parserObj.addOptional('showMM',def.showMM); - parserObj.addOptional('xNames',def.xNames); - parserObj.addOptional('histOpt',def.histOpt); - parserObj.addOptional('divFactor',def.divFactor); - parserObj.addOptional('invert',def.invert); - parserObj.addOptional('addSpread',def.addSpread); - parserObj.addOptional('globalNorm',def.globalNorm); - parserObj.addOptional('groups',def.groups); - parserObj.addOptional('yLabel',def.yLabel); - parserObj.addOptional('color',def.color); - - - parserObj.parse(varargin{:}); - opt = parserObj.Results; - % fill in defaults that are not supported in the old version of the - % code - opt.colormap = []; - opt.variableWidth = true; - opt.histOri = 'center'; - opt.xValues = []; - opt.xMode = 'auto'; - opt.xyOri = 'normal'; - opt.widthDiv = [1 1]; - - % overwrite empties with defaults - inputParser considers empty to be a - % valid input. - fnList = fieldnames(opt); - for fn = fnList' - if isempty(opt.(fn{1})) - opt.(fn{1}) = def.(fn{1}); - end - end - - - % fix a few parameters - if opt.distWidth > 1 - opt.distWidth = opt.distWidth - 1; - else - opt.colormap = 1-gray(128); - end - if opt.distWidth < 0 - opt.variableWidth = false; - opt.distWidth = abs(opt.distWidth); - end - - if ~isempty(opt.xNames) - opt.xMode = 'manual'; - end - - -else - defNames = fieldnames(def); - for dn = defNames(:)' - parserObj.addParamValue(dn{1},def.(dn{1})); - end - - - parserObj.parse(varargin{:}); - opt = parserObj.Results; - - % if groups: deal with data - if ~isempty(opt.groups) - [idx,labels,vals] = grp2idx(opt.groups); - % convert data to cell array - data = accumarray(idx,data{1},[],@(x){x}); - nData = length(data); - % if not otherwise provided, use group labels for xnames - if isempty(opt.xNames) - opt.xNames = labels; - if ~iscell(opt.xNames) - opt.xNames = num2cell(opt.xNames); - end - end - if isnumeric(vals) && isempty(opt.xValues) - opt.xValues = vals; - end - - end - - if ~ischar(opt.xyOri) || ~any(ismember(opt.xyOri,{'normal','flipped'})) - error('option xyOri must be either ''normal'' or ''flipped'' (is ''%s'')',opt.xyOri); - end - - - - -end -% common checks - -% default x-values: 1:n -if isempty(opt.xValues) - opt.xValues = 1:nData; -elseif length(opt.xValues) ~= nData - error('please supply as many x-data values as there are data entries') -elseif length(opt.xValues) > 1 % only check for scale if more than 1 value - % scale width - stdWidth = median(diff(sort(opt.xValues))); - opt.distWidth = opt.distWidth * stdWidth; -end - -if ~isscalar(opt.divFactor) && length(opt.divFactor) == 3 && all(opt.divFactor==def.divFactor) - opt.divFactor = opt.divFactor(floor(opt.histOpt)+1); -end -if isHistogram - opt.histOpt = 99; - if isempty(opt.yLabel) - opt.yLabel = 'counts'; - end -end - - - -% check colors/colormaps: do we need to expand colormap? -if ~iscell(opt.colormap) - opt.colormap = {opt.colormap}; -end -if ~iscell(opt.color) - opt.color = {opt.color}; -end -for iColor = 1:length(opt.color) - if ischar(opt.color{iColor}) - opt.color{iColor} = colorCode2rgb(opt.color{iColor}); - end -end - -% expand - if only single colormap specified, we expand only once -if ~opt.variableWidth - missingColormaps = find(cellfun(@isempty,opt.colormap)); - for iMissing = missingColormaps(:)' - - endColor = opt.color{max(iMissing,length(opt.color))}; - % normally, we go from white to color - cmap = zeros(128,3); - for rgb = 1:3 - cmap(:,rgb) = linspace(1,endColor(rgb),128); - end - opt.colormap{iMissing} = cmap; - - end -end - -% if we have colormaps, we need to create a master which we add to the -% figure. Invert if necessary, and expand the cell array to nData -colormapLength = cellfun(@(x)size(x,1),opt.colormap); -if any(colormapLength>0) - - colormap = cat(1,opt.colormap{:}); - if opt.invert - colormap = 1-colormap; - end - set(fh,'Colormap',colormap) - if length(opt.colormap) == 1 - opt.colormap = repmat(opt.colormap,nData,1); - colormapLength = repmat(colormapLength,nData,1); - colormapOffset = zeros(nData,1); - singleMap = true; - else - colormapOffset = [0;cumsum(colormapLength(1:end-1))]; - singleMap = false; - end - -else - - colormapLength = zeros(nData,1); - if length(opt.color) == 1 - opt.color = repmat(opt.color,nData,1); - end - if opt.invert - opt.color = cellfun(@(x)1-x,opt.color,'uniformOutput',false); - end -end - - -% set hold on -holdState = get(ah,'NextPlot'); -set(ah,'NextPlot','add'); - -% if new axes: invert -if newAx && opt.invert - set(ah,'Color','k') -end - -%=================================== - - - -%=================================== -%% PLOT DISTRIBUTIONS -%=================================== - -% assign output -hh = NaN(nData,1); -[m,md,sem,sd] = deal(nan(nData,1)); -if opt.showMM == 6 - md = nan(nData,3,3); % md/q1/q3, third dim is y/xmin/xmax -end - -% get base x-array -% widthDiv is a 1-by-2 array with -% #ofDivs, whichDiv -% The full width (distWidth) is split into -% #ofDivs; whichDiv says which "stripe" is active -xWidth = opt.distWidth/opt.widthDiv(1); -xMin = -opt.distWidth/2; -xLow = xMin + xWidth * (opt.widthDiv(2)-1); -xBase = [-xWidth;xWidth;xWidth;-xWidth]/2; -xOffset = xLow + xWidth/2; - -% b/c of global norm: loop twice -plotData = cell(nData,2); - -% loop through data. Prepare patch input, then draw patch into gca -for iData = 1:nData - currentData = data{iData}; - % only plot if there is some finite data - if ~isempty(currentData(:)) && any(isfinite(currentData(:))) - - switch floor(opt.histOpt) - case 0 - % use hist - [xHist,yHist] = hist(currentData,opt.divFactor); - - case 1 - % use ksdensity - - if opt.histOpt == 1.1 - % use histogram to estimate kernel - [dummy,x] = histogram(currentData); %#ok - if length(x) == 1 - % only one value. Make fixed distribution - dx = 0.1; - yHist = x; - xHist = sum(isfinite(currentData)); - else - dx = x(2) - x(1); - - % make sure we sample frequently enough - x = min(x)-dx:dx/3:max(x)+dx; - [xHist,yHist] = ksdensity(currentData,x,'kernel','normal','width',dx/(1.5*opt.divFactor)); - end - else - - % x,y are switched relative to normal histogram - [xHist,yHist,u] = ksdensity(currentData,'kernel','normal'); - % take smaller kernel to avoid over-smoothing - if opt.divFactor ~= 1 - [xHist,yHist] = ksdensity(currentData,'kernel','normal','width',u/opt.divFactor); - end - end - - % modify histogram such that the sum of bins (not the - % integral under the curve!) equals the total number of - % observations, in order to be comparable to hist - xHist = xHist/sum(xHist)*sum(isfinite(currentData)); - - case 2 - % use histogram - bar heights are counts as in hist - [xHist,yHist] = histogram(currentData,opt.divFactor,0); - case 99 - % bins,counts already supplied - xHist = currentData(:,2)'; - yHist = currentData(:,1)'; - end - plotData{iData,1} = xHist; - plotData{iData,2} = yHist; - end -end - -goodData = find(~cellfun(@isempty,plotData(:,1))); -% get norm -switch opt.globalNorm - case 3 - % #3 normalizes relative densities - xNorm(goodData) = cellfun(@(x)min(diff(x)),plotData(goodData,2)); - xNorm(goodData) = xNorm(goodData) .* cellfun(@sum,plotData(goodData,1))'; - maxNorm(goodData) = cellfun(@max,plotData(goodData,1)); - xNorm(goodData) = xNorm(goodData)*max(maxNorm(goodData)./xNorm(goodData)); - - case 2 - % #2 should normalize so that the integral of the - % different histograms (i.e. area covered) scale with the - % respective sum of counts across all bins. Requires evenly spaced - % histograms at the moment - xNorm(goodData) = cellfun(@(x)min(diff(x)),plotData(goodData,2)); - maxNorm(goodData) = cellfun(@max,plotData(goodData,1)); - xNorm(goodData) = xNorm(goodData)*max(maxNorm(goodData)./xNorm(goodData)); - case 1 - xNorm(goodData) = max(cat(2,plotData{:,1})); - case 0 - xNorm(goodData) = cellfun(@max,plotData(goodData,1)); -end - - -for iData = goodData' - - % find current data again - currentData = data{iData}; - - xHist = plotData{iData,1}; - yHist = plotData{iData,2}; - - % find y-step - dy = min(diff(yHist)); - if isempty(dy) - dy = 0.1; - end - - % create x,y arrays - nPoints = length(xHist); - xArray = repmat(xBase,1,nPoints); - yArray = repmat([-0.5;-0.5;0.5;0.5],1,nPoints); - - - % x is iData +/- almost 0.5, multiplied with the height of the - % histogram - if opt.variableWidth - - - tmp = xArray.*repmat(xHist,4,1)./xNorm(iData); - - switch opt.histOri - case 'center' - % we can simply use xArray - xArray = tmp; - case 'right' - % shift everything to the left - delta = tmp(1,:) - xArray(1,:); - xArray = bsxfun(@minus,tmp,delta); - case 'left' - % shift everything to the right - delta = tmp(1,:) - xArray(1,:); - xArray = bsxfun(@plus,tmp,delta); - end - - xArray = xArray + opt.xValues(iData); - - else - xArray = xArray + iData; - end - - % add offset (in case we have multiple widthDiv) - xArray = xArray + xOffset; - - - % yData is simply the bin locations - yArray = repmat(yHist,4,1) + dy*yArray; - - % add patch - vertices = [xArray(:),yArray(:)]; - faces = reshape(1:numel(yArray),4,[])'; - - if colormapLength(iData) == 0 - colorOpt = {'FaceColor',opt.color{iData}}; - else - % calculate index into colormap - if singleMap - % use scaled mapping so that colorbar is meaningful - if opt.globalNorm > 0 - colorOpt = {'FaceVertexCData',xHist','CDataMapping','scaled','FaceColor','flat'}; - else - colorOpt = {'FaceVertexCData',xHist'/xNorm(iData),'CDataMapping','scaled','FaceColor','flat'}; - end - - else - idx = round((xHist/xNorm(iData))*(colormapLength(iData)-1))+1; - colorOpt = {'FaceVertexCData',idx'+colormapOffset(iData),'CDataMapping','direct','FaceColor','flat'}; - end - end - - - switch opt.xyOri - case 'normal' - hh(iData)= patch('Vertices',vertices,'Faces',faces,'Parent',ah,colorOpt{:},'EdgeColor','none'); - case 'flipped' - hh(iData)= patch('Vertices',vertices(:,[2,1]),'Faces',faces,'Parent',ah,colorOpt{:},'EdgeColor','none'); - end - - if opt.showMM > 0 - if isHistogram - [m(iData),sem(iData)] = weightedStats(currentData(:,1),currentData(:,2),'w'); - sd(iData) = sem(iData) * sqrt(sum(currentData(:,2))); - % weighted median: where we're at middle weight - % may need some tweaking - goodCurrentData = sortrows(currentData(all(isfinite(currentData),2),:),1); - weightList = cumsum(goodCurrentData(:,2)); - weightList = weightList / weightList(end); - md(iData) = goodCurrentData(find(weightList>0.5,1,'first'),1); - else - m(iData) = nanmean(currentData); - md(iData) = nanmedian(currentData); - sd(iData) = nanstd(currentData); - sem(iData) = sd(iData)/sqrt(sum(isfinite(currentData))); - end - - if opt.showMM == 6 - % read quantiles - "y"-value, plus x-start-stop - % re-use md array which allows using a loop below instead of - % lots of copy-paste - % md array is md/q1/q3, with third dimension y/xmin/xmax - - md(iData,2,1) = prctile(currentData,25); - md(iData,3,1) = prctile(currentData,75); - - for qq = 1:3 - % find corresponding y-bin - yLoc = repmat(... - any(yArray>md(iData,qq,1),1) & any(yArray<=md(iData,qq,1),1),... - [4 1]); - % look up corresponding x-values. Note that there is a bit - % of a risk that the line will be exactly between two very - % different bins - but if we make the line longer, it will - % be ugly almost all the time - md(iData,qq,2) = min( xArray( yLoc ) ); - md(iData,qq,3) = max( xArray( yLoc ) ); - end - - end - end -end % loop - -sh = []; -if opt.addSpread - if isHistogram - disp('Option addSpread is unavailable if data is supplied as histograms. Call plotSpread separately') - else - % add spread - try - sh = plotSpread(ah,data,'xValues',opt.xValues,'xyOri',opt.xyOri); - set(sh{1},'color',[0,128,255]/255); - catch me - if strcmp(me.identifier,'MATLAB:UndefinedFunction') - error('plotSpread not found. Please download it from the Matlab File Exchange') - else - rethrow(me) - end - end - end -end - -mh = [];mdh=[]; -if opt.showMM - % plot mean, median. Mean is filled red circle, median is green square - % I don't know of a very clever way to flip xy and keep everything - % readable, thus it'll be copy-paste - switch opt.xyOri - case 'normal' - if any(opt.showMM==[1,2]) - mh = plot(ah,opt.xValues+xOffset,m,'+r','Color','r','MarkerSize',12); - end - if any(opt.showMM==[1,3]) - mdh = plot(ah,opt.xValues+xOffset,md,'sg','MarkerSize',12); - end - if opt.showMM == 4 - mh = plot(ah,opt.xValues+xOffset,m,'+r','Color','r','MarkerSize',12); - mdh = myErrorbar(ah,opt.xValues+xOffset,m,sem); - end - if opt.showMM == 5 - mh = plot(ah,opt.xValues+xOffset,m,'+r','Color','r','MarkerSize',12); - mdh = myErrorbar(ah,opt.xValues+xOffset,m,sd); - end - if opt.showMM == 6 - mdh(1,:) = plot(ah,squeeze(md(:,1,2:3))',repmat(md(:,1,1)',2,1),'color','r','lineWidth',2);%,'lineStyle','--'); - mdh(2,:) = plot(ah,squeeze(md(:,2,2:3))',repmat(md(:,2,1)',2,1),'color','r','lineWidth',1);%,'lineStyle','--'); - mdh(3,:) = plot(ah,squeeze(md(:,3,2:3))',repmat(md(:,3,1)',2,1),'color','r','lineWidth',1);%,'lineStyle','--'); - end - case 'flipped' - if any(opt.showMM==[1,2]) - mh = plot(ah,m,opt.xValues+xOffset,'+r','Color','r','MarkerSize',12); - end - if any(opt.showMM==[1,3]) - mdh = plot(ah,md,opt.xValues+xOffset,'sg','MarkerSize',12); - end - if opt.showMM == 4 - mh = plot(ah,m,opt.xValues+xOffset,'+r','Color','r','MarkerSize',12); - mdh = myErrorbar(ah,m,opt.xValues+xOffset,[sem,NaN(size(sem))]); - end - if opt.showMM == 5 - mh = plot(ah,m,opt.xValues+xOffset,'+r','Color','r','MarkerSize',12); - mdh = myErrorbar(ah,m,opt.xValues+xOffset,[sd,NaN(size(sd))]); - end - if opt.showMM == 6 - mdh(1,:) = plot(ah,repmat(md(:,1,1)',2,1),squeeze(md(:,1,2:3))','color','r','lineWidth',2);%,'lineStyle','--'); - mdh(2,:) = plot(ah,repmat(md(:,2,1)',2,1),squeeze(md(:,2,2:3))','color','r','lineWidth',1);%,'lineStyle','--'); - mdh(3,:) = plot(ah,repmat(md(:,3,1)',2,1),squeeze(md(:,3,2:3))','color','r','lineWidth',1);%,'lineStyle','--'); - end - end -end - -% find extents of x-axis (or y-axis, if flipped) -minX = min(opt.xValues)-stdWidth; -maxX = max(opt.xValues)+stdWidth; - -if ~isnan(xAxLim(1)) - % we have previous limits - switch opt.xyOri - case 'normal' - minX = min(minX,xAxLim(1)); - maxX = max(maxX,xAxLim(2)); - case 'flipped' - minX = min(minX,yAxLim(1)); - maxX = max(maxX,yAxLim(2)); - end -end - - -% if ~empty, use xNames -switch opt.xyOri - case 'normal' - switch opt.xMode - case 'manual' - if newAx == false - warning('DISTRIBUTIONPLOT:ERASINGLABELS','Plotting into an existing axes and specifying labels will erase previous labels') - end - set(ah,'XTick',opt.xValues); - if ~isempty(opt.xNames) - set(ah,'XTickLabel',opt.xNames) - end - case 'auto' - % no need to do anything - end - if ~isempty(opt.yLabel) - ylabel(ah,opt.yLabel); - end - % have plot start/end properly - xlim(ah,[minX,maxX]) - case 'flipped' - switch opt.xMode - case 'manual' - if newAx == false - warning('DISTRIBUTIONPLOT:ERASINGLABELS','Plotting into an existing axes and specifying labels will erase previous labels') - end - set(ah,'YTick',opt.xValues); - if ~isempty(opt.xNames) - set(ah,'YTickLabel',opt.xNames) - end - case 'auto' - % no need to do anything - end - if ~isempty(opt.yLabel) - xlabel(ah,opt.yLabel); - end - % have plot start/end properly - ylim(ah,[minX,maxX]) -end - - -%========================== - - -%========================== -%% CLEANUP & ASSIGN OUTPUT -%========================== - -if nargout > 0 - handles{1} = hh; - handles{2} = [mh;mdh]; - handles{3} = ah; - handles{4} = sh; -end - -set(ah,'NextPlot',holdState); \ No newline at end of file diff --git a/Plotting/plot_slice.m b/Plotting/plot_slice.m deleted file mode 100755 index cb4f2c8..0000000 --- a/Plotting/plot_slice.m +++ /dev/null @@ -1,88 +0,0 @@ -%% This function plots the CAPs obtained for a given case -% Inputs: -% - Cp: a matrix of size n_clusters x n_voxels with the CAPs (or patterns) -% to plot -% - T: the threshold below which voxels will not be colored -% - maxC: the maximal value at which the color display will saturate -% - mask: a very long vector of logicals symbolizing the regions of the -% brain that are actually to be considered (in-brain voxels typically) -% - brain_final: a 3D matrix used to plot the greyscale brain template on which -% to overlay activity patterns -% - ai: the nii data related to the considered seed, including information -% on the scale differences between Cp indexes ('distance between voxels of -% the matrix') and actual MNI space -% - Dimension: the type of slice to plot ('X', 'Y' or 'Z') -% - MNI: the MNI coordinate of the slice to plot -% - Handle: the handle of the graph to update -function [Handle] = plot_slice(Cp,T,maxC,mask,brain_final,ai,Dimension,MNI,Handle) - - % Computes the matrix index matching the MNI coordinates of interest. - Map = inv(ai.mat); - - switch Dimension - case 'X' - ctr = round(Map(1,1)*MNI+Map(1,4)); - case 'Y' - ctr = round(Map(2,2)*MNI+Map(2,4)); - case 'Z' - ctr = round(Map(3,3)*MNI+Map(3,4)); - end - - % temp contains the volume values (within mask), and is a 3D volume after - % those lines - temp = nan(size(mask)); - temp(mask) = Cp; - temp(isnan(temp)) = 0; - temp = reshape(temp,ai.dim); - - % ho contains the structural underlay slice to plot at right slice, - % while tmpp contains the values to plot on top - switch Dimension - case {'X'} - tmpp = squeeze(temp(ctr,:,:)); - ho = squeeze(brain_final(ctr,:,:)); - case {'Y'} - tmpp = squeeze(temp(:,ctr,:)); - ho = squeeze(brain_final(:,ctr,:)); - case {'Z'} - tmpp = squeeze(temp(:,:,ctr)); - ho = squeeze(brain_final(:,:,ctr)); - end - - % I is an image with values from 0 to 1 (because the original - % image had no negative value) - I = double(ho)'/max(double(ho(:))); - I(I==0) = 1; - - % Creates an 'image with three values per pixel' - Irgb = cat(3,I,I,I); - - % Actual plotting - imagesc(Irgb,'Parent',Handle); - hold(Handle,'on'); - - % Plots the slice of interest on top of the brain template - h=imagesc(tmpp','Parent',Handle); - set(Handle,'YDir','normal'); - - % At this stage, ddd contains a set of colors, with white if the values - % are too low. We ask the axes of interest to be using this colormap - % colormap(Handle,ddd); - tmp_cm = cbrewer('div','RdBu',1000); - colormap(Handle,flipud(tmp_cm)); - - % Defines that the topmost and bottommost elements of the - % colormap will map maxC and -maxC respectively - caxis(Handle,[-1 1]*maxC); - - % Opacity: sets data points below the value of interest as - % transparent (they are white, but transparent); note that we do this - % specifically for the h imagesc (the one to plot on top) - A = ones(size(tmpp)); - A(abs(tmpp)