Commit a602d86c authored by Niels Kloosterman's avatar Niels Kloosterman
Browse files

fixed point averaging coarse graining + minor changes

parent 189fd2f2
...@@ -91,8 +91,8 @@ ft_preamble trackconfig % this converts the cfg structure in a config obje ...@@ -91,8 +91,8 @@ ft_preamble trackconfig % this converts the cfg structure in a config obje
% the ft_abort variable is set to true or false in ft_preamble_init % the ft_abort variable is set to true or false in ft_preamble_init
if ft_abort if ft_abort
% do not continue function execution in case the outputfile is present and the user indicated to keep it % do not continue function execution in case the outputfile is present and the user indicated to keep it
return return
end end
% ensure that the input data is valid for this function, this will also do % ensure that the input data is valid for this function, this will also do
...@@ -123,15 +123,19 @@ r = ft_getopt(cfg, 'r', 0.5); % similarity criterion, 0.5 ...@@ -123,15 +123,19 @@ r = ft_getopt(cfg, 'r', 0.5); % similarity criterion, 0.5
polyremoval = ft_getopt(cfg, 'polyremoval', 0); polyremoval = ft_getopt(cfg, 'polyremoval', 0);
recompute_r = ft_getopt(cfg, 'recompute_r', 'perscale_toi_sp'); % recompute r for each scale (1) recompute_r = ft_getopt(cfg, 'recompute_r', 'perscale_toi_sp'); % recompute r for each scale (1)
coarsegrainmethod = ft_getopt(cfg, 'coarsegrainmethod', 'filtskip'); % coarsening_filt_skip or coarsening_avg coarsegrainmethod = ft_getopt(cfg, 'coarsegrainmethod', 'filtskip'); % coarsening_filt_skip or coarsening_avg
filtmethod = ft_getopt(cfg, 'filtmethod', 'lp'); filtmethod = ft_getopt(cfg, 'filtmethod', 'lp');
mem_available = ft_getopt(cfg, 'mem_available', 8e9); % 8 GB mem_available = ft_getopt(cfg, 'mem_available', 8e9); % 8 GB
allowgpu = ft_getopt(cfg, 'allowgpu', 1); % 8 GB allowgpu = ft_getopt(cfg, 'allowgpu', 1); % 8 GB
if strcmp(cfg.coarsegrainmethod, 'pointavg')
filtmethod = 'no'; % no filtering for point averaging
end
gpuavailable = gpuDeviceCount; gpuavailable = gpuDeviceCount;
if allowgpu && gpuavailable if allowgpu && gpuavailable
fprintf('GPU device found. Running things there\n') fprintf('GPU device found. Running things there\n')
gpu = gpuDevice; gpu = gpuDevice;
mem_available = gpu.AvailableMemory * 0.6; % only use % of available mem, other vars also required there mem_available = gpu.AvailableMemory * 0.6; % only use % of available mem, other vars also required there
end end
% select channels and trials of interest, by default this will select all channels and trials % select channels and trials of interest, by default this will select all channels and trials
...@@ -170,290 +174,289 @@ sampen = nan(nchan, nscales, ntoi); ...@@ -170,290 +174,289 @@ sampen = nan(nchan, nscales, ntoi);
r_estimate = nan(nchan, nscales, ntoi, nscales); % dimord chan nsc ntoi nstartingpts r_estimate = nan(nchan, nscales, ntoi, nscales); % dimord chan nsc ntoi nstartingpts
for s = 1:numel(timescales) % loop through timescales for s = 1:numel(timescales) % loop through timescales
sc = timescales(s); sc = timescales(s);
% apply filtering here
switch filtmethod
case 'lp'
if sc == 1
data_filt = data;
else
fs = data.fsample;
nyquist = (fs/2);
fcLowPass = (1/sc)*nyquist;
if fcLowPass == nyquist
fcLowPass = fcLowPass-1;
end
[B,A] = butter(6,fcLowPass/nyquist);
cfg.freq(1,s) = fcLowPass;
padlength = ceil(size(data.trial{1},2)./2); % use half the length of trial 1 as padding (JQK)
x_pad = cellfun(@(a) ft_preproc_padding(a, 'mean', padlength), data.trial, 'UniformOutput', false ); % add padding
x_pad = cellfun(@transpose, x_pad, 'UniformOutput', false); % transpose for filtfilt: time x chan
resamp_x_pad = cellfun(@(x_pad) filtfilt(B,A,x_pad), x_pad, 'UniformOutput', false ); % filter data
resamp_x_pad = cellfun(@transpose, resamp_x_pad, 'UniformOutput', false); % transpose back : chan x time again
resamp_x = cellfun(@(resamp_x_pad) ft_preproc_padding(resamp_x_pad, 'remove', padlength), resamp_x_pad, 'UniformOutput', false ); % remove padding
% create data_filt structure
data_filt = data;
data_filt.trial = resamp_x;
clear resamp_* x_pad;
end
case 'bp'
fs = data.fsample;
nyquist = fs/2;
fcLowPass = (1/sc)*nyquist;
if fcLowPass == nyquist
fcLowPass = fcLowPass-1;
end
if s == numel(timescales)
fcHighPass = 0.5;
else
fcHighPass = (1/(timescales(s+1)))*nyquist;
end
[B,A] = butter(6,fcLowPass/nyquist); % define low-pass filter: https://de.mathworks.com/help/signal/ref/butter.html
[D,C] = butter(6,fcHighPass/nyquist, 'high'); % define high-pass filter
cfg.freq(1,s) = fcLowPass;
cfg.freq(2,s) = fcHighPass;
padlength = ceil(size(data.trial{1},2)./2); % use half the length of trial 1 as padding (JQK)
x_pad = cellfun(@(a) ft_preproc_padding(a, 'mean', padlength), data.trial, 'UniformOutput', false ); % add padding
x_pad = cellfun(@transpose, x_pad, 'UniformOutput', false); % transpose for filtfilt: time x chan
if sc == 1 % only HPF
resamp_x_pad = cellfun(@(x_pad) filtfilt(D,C,x_pad), x_pad, 'UniformOutput', false ); % high-pass filter data
else
resamp_x_pad = cellfun(@(x_pad) filtfilt(B,A,x_pad), x_pad, 'UniformOutput', false ); % low-pass filter data
resamp_x_pad = cellfun(@(resamp_x_pad) filtfilt(D,C,resamp_x_pad), resamp_x_pad, 'UniformOutput', false ); % high-pass filter data
end
resamp_x_pad = cellfun(@transpose, resamp_x_pad, 'UniformOutput', false); % transpose back : chan x time again
resamp_x = cellfun(@(resamp_x_pad) ft_preproc_padding(resamp_x_pad, 'remove', padlength), ... % remove padding
resamp_x_pad, 'UniformOutput', false );
%figure; hold on; plot(resamp_x{1}(1,:)); plot(data.trial{1}(1,:))
% create data_filt structure
data_filt = data;
data_filt.trial = resamp_x;
clear resamp_* x_pad;
case 'hp'
fs = data.fsample;
nyquist = (fs/2);
fcHighPass = (1/(sc+1))*nyquist;
[D,C] = butter(6,fcHighPass/nyquist, 'high'); % define high-pass filter
cfg.freq(1,s) = fcHighPass;
padlength = ceil(size(data.trial{1},2)./2); % use half the length of trial 1 as padding (JQK)
x_pad = cellfun(@(a) ft_preproc_padding(a, 'mean', padlength), data.trial, 'UniformOutput', false ); % add padding
x_pad = cellfun(@transpose, x_pad, 'UniformOutput', false); % transpose for filtfilt: time x chan
resamp_x_pad = cellfun(@(x_pad) filtfilt(D,C,x_pad), x_pad, 'UniformOutput', false ); % low-pass filter data
resamp_x_pad = cellfun(@transpose, resamp_x_pad, 'UniformOutput', false); % transpose back : chan x time again
resamp_x = cellfun(@(resamp_x_pad) ft_preproc_padding(resamp_x_pad, 'remove', padlength), ... % remove padding
resamp_x_pad, 'UniformOutput', false );
%figure; hold on; plot(resamp_x{1}(1,:)); plot(data_sel.trial{1}(1,:))
% create data_filt structure
data_filt = data;
data_filt.trial = resamp_x;
clear resamp_* x_pad;
case 'no'
data_filt = data;
end
if strcmp(recompute_r, 'per_scale') % recompute r for each scale or: sc toi sp
% per_scale
% per_toi
% pertoi_sp (fixed per scale)
% perscale_toi_sp (run til now)
% perscale_toi
% apply filtering here r_new = r * std(cell2mat(data_filt.trial),1,2);
switch filtmethod end
case 'lp'
if sc == 1 for itoi = 1:ntoi
data_filt = data;
else fprintf('Scale %d of %d; Time %d of %d\n', s, length(timescales),itoi, ntoi)
fs = data.fsample;
nyquist = (fs/2); % select time window of interest from each trial
fcLowPass = (1/sc)*nyquist; tmpcfg=[];
if fcLowPass == nyquist tmpcfg.toilim = [toi(itoi)-timwin*0.5 toi(itoi)+timwin*0.5];
fcLowPass = fcLowPass-1; tmpcfg.showcallinfo = 'no';
end data_sel = ft_redefinetrial(tmpcfg, data_filt);
[B,A] = butter(6,fcLowPass/nyquist);
cfg.freq(1,s) = fcLowPass; % only take trials that have the whole interval
tmpcfg = [];
padlength = ceil(size(data.trial{1},2)./2); % use half the length of trial 1 as padding (JQK) tmpcfg.minlength = timwin;
x_pad = cellfun(@(a) ft_preproc_padding(a, 'mean', padlength), data.trial, 'UniformOutput', false ); % add padding tmpcfg.showcallinfo = 'no';
x_pad = cellfun(@transpose, x_pad, 'UniformOutput', false); % transpose for filtfilt: time x chan data_sel = ft_redefinetrial(tmpcfg, data_sel);
resamp_x_pad = cellfun(@(x_pad) filtfilt(B,A,x_pad), x_pad, 'UniformOutput', false ); % filter data
resamp_x_pad = cellfun(@transpose, resamp_x_pad, 'UniformOutput', false); % transpose back : chan x time again % need 40 samples for mse calc, 3 smp per trial for scale 42: 40/3 = 13.3 trials, make 15
resamp_x = cellfun(@(resamp_x_pad) ft_preproc_padding(resamp_x_pad, 'remove', padlength), resamp_x_pad, 'UniformOutput', false ); % remove padding ntrials = size(data_sel.trial,2);
% create data_filt structure if ntrials < 1
data_filt = data; warning('Time point %g: Not enough trials remain', toi(itoi))
data_filt.trial = resamp_x; break % subsequent time points will also not work
clear resamp_* x_pad;
end
case 'bp'
fs = data.fsample;
nyquist = fs/2;
fcLowPass = (1/sc)*nyquist;
if fcLowPass == nyquist
fcLowPass = fcLowPass-1;
end
if s == numel(timescales)
fcHighPass = 0.5;
else
fcHighPass = (1/(timescales(s+1)))*nyquist;
end
[B,A] = butter(6,fcLowPass/nyquist); % define low-pass filter: https://de.mathworks.com/help/signal/ref/butter.html
[D,C] = butter(6,fcHighPass/nyquist, 'high'); % define high-pass filter
cfg.freq(1,s) = fcLowPass;
cfg.freq(2,s) = fcHighPass;
padlength = ceil(size(data.trial{1},2)./2); % use half the length of trial 1 as padding (JQK)
x_pad = cellfun(@(a) ft_preproc_padding(a, 'mean', padlength), data.trial, 'UniformOutput', false ); % add padding
x_pad = cellfun(@transpose, x_pad, 'UniformOutput', false); % transpose for filtfilt: time x chan
if sc == 1 % only HPF
resamp_x_pad = cellfun(@(x_pad) filtfilt(D,C,x_pad), x_pad, 'UniformOutput', false ); % high-pass filter data
else
resamp_x_pad = cellfun(@(x_pad) filtfilt(B,A,x_pad), x_pad, 'UniformOutput', false ); % low-pass filter data
resamp_x_pad = cellfun(@(resamp_x_pad) filtfilt(D,C,resamp_x_pad), resamp_x_pad, 'UniformOutput', false ); % high-pass filter data
end
resamp_x_pad = cellfun(@transpose, resamp_x_pad, 'UniformOutput', false); % transpose back : chan x time again
resamp_x = cellfun(@(resamp_x_pad) ft_preproc_padding(resamp_x_pad, 'remove', padlength), ... % remove padding
resamp_x_pad, 'UniformOutput', false );
%figure; hold on; plot(resamp_x{1}(1,:)); plot(data.trial{1}(1,:))
% create data_filt structure
data_filt = data;
data_filt.trial = resamp_x;
clear resamp_* x_pad;
case 'hp'
fs = data.fsample;
nyquist = (fs/2);
fcHighPass = (1/(sc+1))*nyquist;
[D,C] = butter(6,fcHighPass/nyquist, 'high'); % define high-pass filter
cfg.freq(1,s) = fcHighPass;
padlength = ceil(size(data.trial{1},2)./2); % use half the length of trial 1 as padding (JQK)
x_pad = cellfun(@(a) ft_preproc_padding(a, 'mean', padlength), data.trial, 'UniformOutput', false ); % add padding
x_pad = cellfun(@transpose, x_pad, 'UniformOutput', false); % transpose for filtfilt: time x chan
resamp_x_pad = cellfun(@(x_pad) filtfilt(D,C,x_pad), x_pad, 'UniformOutput', false ); % low-pass filter data
resamp_x_pad = cellfun(@transpose, resamp_x_pad, 'UniformOutput', false); % transpose back : chan x time again
resamp_x = cellfun(@(resamp_x_pad) ft_preproc_padding(resamp_x_pad, 'remove', padlength), ... % remove padding
resamp_x_pad, 'UniformOutput', false );
%figure; hold on; plot(resamp_x{1}(1,:)); plot(data_sel.trial{1}(1,:))
% create data_filt structure
data_filt = data;
data_filt.trial = resamp_x;
clear resamp_* x_pad;
case 'no'
data_filt = data;
end end
if strcmp(recompute_r, 'per_scale') % recompute r for each scale or: sc toi sp if strcmp(recompute_r, 'per_toi') % not per scale
% per_scale
% per_toi % select time window of interest from each trial
% pertoi_sp (fixed per scale) tmpcfg=[];
% perscale_toi_sp (run til now) tmpcfg.toilim = [toi(itoi)-timwin*0.5 toi(itoi)+timwin*0.5];
% perscale_toi data_sel_unfilt = ft_redefinetrial(tmpcfg, data);
r_new = r * std(cell2mat(data_filt.trial),1,2); % only take trials that have the whole interval
tmpcfg = [];
tmpcfg.minlength = timwin;
data_sel_unfilt = ft_redefinetrial(tmpcfg, data_sel_unfilt);
% need 40 samples for mse calc, 3 smp per trial for scale 42: 40/3 = 13.3 trials, make 15
ntrials = size(data_sel_unfilt.trial,2);
if ntrials < 1
warning('Time point %g: Not enough trials remain', toi(itoi))
break % subsequent time points will also not work
end
% calculate similarity criterion
r_new = r * std(cell2mat(data_sel_unfilt.trial),1,2);
nchan = size(data_sel_unfilt.trial{1},1);
elseif strcmp(recompute_r, 'perscale_toi')
% calculate similarity criterion
r_new = r * std(cell2mat(data_sel.trial),1,2);
nchan = size(data_sel.trial{1},1);
end end
for itoi = 1:ntoi % do point skipping
cg_data = {};
fprintf('Scale %d of %d; Time %d of %d\n', s, length(timescales),itoi, ntoi) switch coarsegrainmethod
case 'filtskip'
% select time window of interest from each trial nloops = sc;
tmpcfg=[]; cg_data = cell(nloops,1); % make cell: cg_data{istart}{trials}(chan-by-time)
tmpcfg.toilim = [toi(itoi)-timwin*0.5 toi(itoi)+timwin*0.5]; resamp_x = data_sel.trial;
tmpcfg.showcallinfo = 'no'; for is = 1:nloops % loop over starting points here!
data_sel = ft_redefinetrial(tmpcfg, data_filt); cg_data{is} = cellfun(@(resamp_x) resamp_x(:, is:(sc-1+1):end), resamp_x, 'UniformOutput', false ); % add padding% Filter
% only take trials that have the whole interval
tmpcfg = [];
tmpcfg.minlength = timwin;
tmpcfg.showcallinfo = 'no';
data_sel = ft_redefinetrial(tmpcfg, data_sel);
% need 40 samples for mse calc, 3 smp per trial for scale 42: 40/3 = 13.3 trials, make 15
ntrials = size(data_sel.trial,2);
if ntrials < 1
warning('Time point %g: Not enough trials remain', toi(itoi))
break % subsequent time points will also not work
end end
clear resamp_x;
if strcmp(recompute_r, 'per_toi') % not per scale case 'pointavg' % original point averaging coarse graining, no loop over starting points
nloops = 1; % no starting points loop for point avg
% select time window of interest from each trial if sc == 1 % no coarse graining for native sampling rate
tmpcfg=[]; cg_data{1} = data_sel.trial; %only keep trial data
tmpcfg.toilim = [toi(itoi)-timwin*0.5 toi(itoi)+timwin*0.5]; else % coarse-grain time series at this time scale
data_sel_unfilt = ft_redefinetrial(tmpcfg, data); nchan = size(data_sel.trial{1},1);
for itrial = 1:length(data_sel.trial)
% only take trials that have the whole interval num_cg_tpts = floor(length(data_sel.trial{itrial})/sc); % number of coarse-grained time points
tmpcfg = []; cg_data{1}{itrial} = zeros(nchan, num_cg_tpts); % preallocate cg_data matrix
tmpcfg.minlength = timwin; for t = 1:num_cg_tpts % calculate coarse_grained time series
data_sel_unfilt = ft_redefinetrial(tmpcfg, data_sel_unfilt); cg_data{1}{itrial}(:,t) = mean( data_sel.trial{itrial}(:, (t-1)*sc + [1:sc]) ,2);
% need 40 samples for mse calc, 3 smp per trial for scale 42: 40/3 = 13.3 trials, make 15
ntrials = size(data_sel_unfilt.trial,2);
if ntrials < 1
warning('Time point %g: Not enough trials remain', toi(itoi))
break % subsequent time points will also not work
end end
end
% calculate similarity criterion
r_new = r * std(cell2mat(data_sel_unfilt.trial),1,2);
nchan = size(data_sel_unfilt.trial{1},1);
elseif strcmp(recompute_r, 'perscale_toi')
% calculate similarity criterion
r_new = r * std(cell2mat(data_sel.trial),1,2);
nchan = size(data_sel.trial{1},1);
end end
end
% after coarsegraining, loop mse computation across starting points
allcont = zeros(sc, nchan, m+1); % start_chan_m
for istart = 1:nloops
if max(cellfun(@(x) size(x,2), cg_data{istart})) == m % TODO check this at start
fprintf('Coarse grained trials below %d + 1 samples, skipping remaining starting points\n', m)
break
end
% concatenate trials and convert to single
y = single(cell2mat(cg_data{istart}));
% collect trial bounds and create mask with valid time points for pats
trl_bounds = cumsum(cellfun(@(x) size(x,2), cg_data{istart}))';
trl_mask = true(size(y,2),1);
if allowgpu && gpuavailable
trl_mask = gpuArray(trl_mask);
end
trl_mask([trl_bounds-1; trl_bounds]) = false;
% break if n data points < 100 (See Grandy et al., 2016)
ndatapoints = length(trl_mask); % TODO check this at start
if ndatapoints < 100
fprintf('N data points < 100, breaking\n')
break
end
% Calculate sample entropy of coarse grained time series
if strcmp(recompute_r, 'perscale_toi_sp')
r_new = r * std(y,1,2);
end
% keep the estimated r parameter
r_estimate(:, s, itoi, istart) = r_new; % dimord chan nsc ntoi nstartingpts
% chunk y to keep memory usage in check
max_numel = mem_available/4; % single = 4 bytes
chunk_size = round(max_numel/numel(y));
n_chunks = ceil(size(y,2)/chunk_size);
temp = 1;
chunk_borders = zeros(n_chunks,2);
for ic = 1:n_chunks
chunk_borders(ic,:) = [temp temp+chunk_size];
temp = temp+chunk_size-1; % chunks need to overlap to avoid missing pats at chunk borders
end
chunk_borders(end) = size(y,2);
clear temp
%fprintf('starting point %d\n', istart)
cont = zeros(m+1, n_chunks, nchan);
y_chunk1 = shiftdim(y', -1 ); % insert singleton dim
r_new2 = shiftdim(r_new, -2);
if allowgpu && gpuavailable
cont = gpuArray(cont);
y_chunk1 = gpuArray(y_chunk1);
r_new2 = gpuArray(r_new2);
end
fprintf('%d chunks: ', n_chunks)
for ic = 1:n_chunks
fprintf('%d ', ic)
% do point skipping y_inds = transpose(chunk_borders(ic,1):chunk_borders(ic,2));
cg_data = {};
switch coarsegrainmethod
case 'filtskip'
nloops = sc;
cg_data = cell(nloops,1); % make cell: cg_data{istart}{trials}(chan-by-time)
resamp_x = data_sel.trial;
for is = 1:nloops % loop over starting points here!
cg_data{is} = cellfun(@(resamp_x) resamp_x(:, is:(sc-1+1):end), resamp_x, 'UniformOutput', false ); % add padding% Filter
end
clear resamp_x;
case 'pointavg' % original point averaging coarse graining, no loop over starting points
if sc == 1 % no coarse graining for native sampling rate
cg_data{1} = data_sel.trial; %only keep trial data
nloops = 1; % no loop across starting points
else % coarse-grain time series at this time scale
nloops = 1; % no loop across starting points
nchan = size(data_sel.trial{1},1);
for itrial = 1:length(data_sel.trial)
num_cg_tpts = floor(length(data_sel.trial{itrial})/sc); % number of coarse-grained time points
cg_data{1}{itrial} = zeros(nchan, num_cg_tpts); % preallocate cg_data matrix
for t = 1:num_cg_tpts % calculate coarse_grained time series
cg_data{1}{itrial}(:,t) = mean( data_sel.trial{itrial}(:, (t-1)*sc + [1:sc]) ,2);
end
end
end
end
% after coarsegraining, loop mse computation across starting points
allcont = zeros(sc, nchan, m+1); % start_chan_m
for istart = 1:nloops
if max(cellfun(@(x) size(x,2), cg_data{istart})) == m % TODO check this at start
fprintf('Coarse grained trials below %d + 1 samples, skipping remaining starting points\n', m)
break
end
% concatenate trials and convert to single
y = single(cell2mat(cg_data{istart}));
% collect trial bounds and create mask with valid time points for pats
trl_bounds = cumsum(cellfun(@(x) size(x,2), cg_data{istart}))';
trl_mask = true(size(y,2),1);
if allowgpu && gpuavailable
trl_mask = gpuArray(trl_mask);
end
trl_mask([trl_bounds-1; trl_bounds]) = false;
% break if n data points < 100 (See Grandy et al., 2016)
ndatapoints = length(trl_mask); % TODO check this at start
if ndatapoints < 100
fprintf('N data points < 100, breaking\n')
break
end
% Calculate sample entropy of coarse grained time series
if strcmp(recompute_r, 'perscale_toi_sp')
r_new = r * std(y,1,2);
end
% keep the estimated r parameter
r_estimate(:, s, itoi, istart) = r_new; % dimord chan nsc ntoi nstartingpts
% chunk y to keep memory usage in check
max_numel = mem_available/4; % single = 4 bytes
chunk_size = round(max_numel/numel(y));
n_chunks = ceil(size(y,2)/chunk_size);
temp = 1;
chunk_borders = zeros(n_chunks,2);
for ic = 1:n_chunks
chunk_borders(ic,:) = [temp temp+chunk_size];
temp = temp+chunk_size-1; % chunks need to overlap to avoid missing pats at chunk borders
end
chunk_borders(end) = size(y,2);
clear temp
%fprintf('starting point %d\n', istart)
cont = zeros(m+1, n_chunks, nchan);
y_chunk1 = shiftdim(y', -1 ); % insert singleton dim
r_new2 = shiftdim(r_new, -2);
if allowgpu && gpuavailable
cont = gpuArray(cont);
y_chunk1 = gpuArray(y_chunk1);
r_new2 = gpuArray(r_new2);
end
fprintf('%d chunks: ', n_chunks)
for ic = 1:n_chunks
fprintf('%d ', ic)
y_inds = transpose(chunk_borders(ic,1):chunk_borders(ic,2));
y_chunk2 = permute(y_chunk1(1,y_inds,:), [2 1 3]); % insert singleton dim
if allowgpu && gpuavailable
y_chunk2 = gpuArray(y_chunk2);
end
ymat = bsxfun(@le, abs(bsxfun(@minus, y_chunk1, y_chunk2 )), r_new2 );
for ichan=1:nchan % loop since triu only supports 2D
ymat(:,:,ichan) = triu(ymat(:,:,ichan), chunk_borders(ic,1));
end
for k = 1:m+1
if k >= m % TODO try for m > 2
cont(k,ic,:) = sum(reshape(ymat(trl_mask(y_inds(1:end-2)), trl_mask, :), [], nchan));
end
if k < m+1
ymat = ymat & circshift(ymat, [-1 -1 0]);
end
end
clear ymat y_inds y_chunk2
end
allcont(istart, :, :) = gather(squeeze(sum(cont,2))'); % sum over chunks. dimord: start_chan_m
%fprintf('\n')
end % cg starting points
allcont = sum(allcont,1); % sum counts over starting points
if ndatapoints < 100 y_chunk2 = permute(y_chunk1(1,y_inds,:), [2 1 3]); % insert singleton dim
fprintf('N data points < 100, breaking\n') if allowgpu && gpuavailable
break y_chunk2 = gpuArray(y_chunk2);
end end
% calculate sample entropy ymat = bsxfun(@le, abs(bsxfun(@minus, y_chunk1, y_chunk2 )), r_new2 );
for ichan=1:nchan for ichan=1:nchan % loop since triu only supports 2D
if allcont(1,ichan,m+1) == 0 || allcont(1,ichan,m) == 0 ymat(:,:,ichan) = triu(ymat(:,:,ichan), chunk_borders(ic,1));
fprintf('zero patterns found!\n')
% nlin_sc = size(pnts,1); % ori THG code
% mse(s) = -log(1/((nlin_sc)*(nlin_sc -1)));
npossiblepats = length(find(trl_mask));
sampen(ichan,s,itoi) = -log(1/(npossiblepats*(npossiblepats-1)));
else
sampen(ichan,s,itoi) = -log(allcont(1,ichan,m+1)./allcont(1,ichan,m)); % same as log(cont(m)/cont(m+1))
end
end end
end % for toi for k = 1:m+1
if k >= m % TODO try for m > 2
cont(k,ic,:) = sum(reshape(ymat(trl_mask(y_inds(1:end-2)), trl_mask, :), [], nchan));
end
if k < m+1
ymat = ymat & circshift(ymat, [-1 -1 0]);
end
end
clear ymat y_inds y_chunk2
end
allcont(istart, :, :) = gather(squeeze(sum(cont,2))'); % sum over chunks. dimord: start_chan_m
%fprintf('\n')
end % cg starting points
allcont = sum(allcont,1); % sum counts over starting points
if ndatapoints < 100
fprintf('N data points < 100, breaking\n')
break
end
% calculate sample entropy
for ichan=1:nchan
if allcont(1,ichan,m+1) == 0 || allcont(1,ichan,m) == 0
fprintf('zero patterns found!\n')
% nlin_sc = size(pnts,1); % ori THG code
% mse(s) = -log(1/((nlin_sc)*(nlin_sc -1)));
npossiblepats = length(find(trl_mask));
sampen(ichan,s,itoi) = -log(