Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[REF] Use nilearn and images instead of arrays #35

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 19 additions & 38 deletions mapca/mapca.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
"""
import logging

import nibabel as nib
import numpy as np
from nilearn import image, masking
from nilearn._utils import check_niimg_3d, check_niimg_4d
from scipy.stats import kurtosis
from sklearn.decomposition import PCA
Expand Down Expand Up @@ -102,29 +102,28 @@ def _fit(self, img, mask):

img = check_niimg_4d(img)
mask = check_niimg_3d(mask)
data = img.get_fdata()
mask = mask.get_fdata()
data = masking.apply_mask(img, mask) # T x S
X = data.T # S x T

[n_x, n_y, n_z, n_timepoints] = data.shape
data_nib_V = np.reshape(data, (n_x * n_y * n_z, n_timepoints), order="F")
mask_vec = np.reshape(mask, n_x * n_y * n_z, order="F")
X = data_nib_V[mask_vec == 1, :]

n_samples = np.sum(mask_vec)
[n_x, n_y, n_z, n_timepoints] = img.shape
n_samples = X.shape[0]

self.scaler_ = StandardScaler(with_mean=True, with_std=True)
if self.normalize:
# TODO: determine if tedana is already normalizing before this
X = self.scaler_.fit_transform(X.T).T # This was X_sc
# X = ((X.T - X.T.mean(axis=0)) / X.T.std(axis=0)).T

X_img = masking.unmask(X.T, mask)

LGR.info("Performing SVD on original data...")
V, eigenvalues = utils._icatb_svd(X, n_timepoints)
LGR.info("SVD done on original data")

# Reordering of values
eigenvalues = eigenvalues[::-1]
dataN = np.dot(X, V[:, ::-1])
dataN_img = masking.unmask(dataN.T, mask)
# Potentially the small differences come from the different signs on V

# Using 12 gaussian components from middle, top and bottom gaussian
Expand Down Expand Up @@ -155,13 +154,11 @@ def _fit(self, img, mask):

# Estimate the subsampling depth for effectively i.i.d. samples
LGR.info("Estimating the subsampling depth for effective i.i.d samples...")
mask_ND = np.reshape(mask_vec, (n_x, n_y, n_z), order="F")
sub_depth = len(idx)
sub_iid_sp = np.zeros((sub_depth,))
for i in range(sub_depth):
x_single = np.zeros(n_x * n_y * n_z)
x_single[mask_vec == 1] = dataN[:, idx[i]]
x_single = np.reshape(x_single, (n_x, n_y, n_z), order="F")
x_single = image.index_img(dataN_img, idx[i])
x_single = x_single.get_fdata()
sub_iid_sp[i] = utils._est_indp_sp(x_single)[0] + 1
if i > 6:
tmp_sub_sp = sub_iid_sp[0:i]
Expand All @@ -177,17 +174,11 @@ def _fit(self, img, mask):
N = np.round(n_samples / np.power(sub_iid_sp_median, dim_n))

if sub_iid_sp_median != 1:
mask_s = utils._subsampling(mask_ND, sub_iid_sp_median)
mask_s_1d = np.reshape(mask_s, np.prod(mask_s.shape), order="F")
dat = np.zeros((int(np.sum(mask_s_1d)), n_timepoints))
LGR.info("Generating subsampled i.i.d. data...")
for i_vol in range(n_timepoints):
x_single = np.zeros(n_x * n_y * n_z)
x_single[mask_vec == 1] = X[:, i_vol]
x_single = np.reshape(x_single, (n_x, n_y, n_z), order="F")
dat0 = utils._subsampling(x_single, sub_iid_sp_median)
dat0 = np.reshape(dat0, np.prod(dat0.shape), order="F")
dat[:, i_vol] = dat0[mask_s_1d == 1]
mask_s = utils._subsampling(mask, sub_iid_sp_median)
dat_s = utils._subsampling(X_img, sub_iid_sp_median)
dat = masking.apply_mask(dat_s, mask_s) # T x S
dat = dat.T # S x T

# Perform Variance Normalization
temp_scaler = StandardScaler(with_mean=True, with_std=True)
Expand Down Expand Up @@ -264,11 +255,9 @@ def _fit(self, img, mask):
component_maps = np.dot(
np.dot(X, self.components_.T), np.diag(1.0 / self.explained_variance_)
)
component_maps_3d = np.zeros((n_x * n_y * n_z, n_components))
component_maps_3d[mask_vec == 1, :] = component_maps
component_maps_3d = np.reshape(component_maps_3d, (n_x, n_y, n_z, n_components), order="F")
self.u_ = component_maps
self.u_nii_ = nib.Nifti1Image(component_maps_3d, img.affine, img.header)
component_imgs = masking.unmask(component_maps.T, mask)
self.u_nii_ = component_imgs

def fit(self, img, mask):
"""Fit the model with X.
Expand Down Expand Up @@ -356,23 +345,15 @@ def inverse_transform(self, img, mask):
"""
img = check_niimg_4d(img)
mask = check_niimg_3d(mask)
data = img.get_fdata()
mask = mask.get_fdata()

[n_x, n_y, n_z, n_components] = data.shape
data_nib_V = np.reshape(data, (n_x * n_y * n_z, n_components), order="F")
mask_vec = np.reshape(mask, n_x * n_y * n_z, order="F")
X = data_nib_V[mask_vec == 1, :]
X = masking.apply_mask(img, mask) # T x S
X = X.T # S x T

X_orig = np.dot(np.dot(X, np.diag(self.explained_variance_)), self.components_)
if self.normalize:
X_orig = self.scaler_.inverse_transform(X_orig.T).T

n_t = X_orig.shape[1]
out_data = np.zeros((n_x * n_y * n_z, n_t))
out_data[mask_vec == 1, :] = X_orig
out_data = np.reshape(out_data, (n_x, n_y, n_z, n_t), order="F")
img_orig = nib.Nifti1Image(out_data, img.affine, img.header)
img_orig = masking.unmask(X_orig.T, mask)
return img_orig


Expand Down
24 changes: 20 additions & 4 deletions mapca/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
"""
import logging

import nibabel as nib
import numpy as np
from scipy.fftpack import fftn, fftshift
from scipy.linalg import svd
Expand Down Expand Up @@ -179,7 +180,7 @@ def _est_indp_sp(data):
return n_iters, ent_rate


def _subsampling(data, sub_depth):
def _subsampling(img, sub_depth):
"""
Subsampling the data evenly with space 'sub_depth'.

Expand All @@ -195,10 +196,25 @@ def _subsampling(data, sub_depth):
out : ndarray
Subsampled data
"""

if not isinstance(img, np.ndarray):
data = img.get_fdata()

# Prepare image
target_affine = img.affine.copy()
diag = np.diag(target_affine).copy()
diag[:3] *= sub_depth
np.fill_diagonal(target_affine, diag)
else:
data = img.copy()
slices = [slice(None, None, sub_depth)] * data.ndim
out = data[tuple(slices)]
return out
subsampled_data = data[tuple(slices)]

if not isinstance(img, np.ndarray):
subsampled_img = nib.Nifti1Image(subsampled_data, target_affine, img.header)
else:
subsampled_img = subsampled_data

return subsampled_img


def _kurtn(data):
Expand Down