From a1d7faf8d7d50d193b4ed0b0e6cc31e46e741090 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Mon, 22 Apr 2024 11:43:11 -0400 Subject: [PATCH 1/6] Add type hints to metric functions. --- tedana/metrics/dependence.py | 115 ++++++++++++++++++++++++++++++----- 1 file changed, 99 insertions(+), 16 deletions(-) diff --git a/tedana/metrics/dependence.py b/tedana/metrics/dependence.py index 69693e452..8e854e1ff 100644 --- a/tedana/metrics/dependence.py +++ b/tedana/metrics/dependence.py @@ -1,7 +1,9 @@ """Metrics evaluating component TE-dependence or -independence.""" import logging +import typing +import nibabel as nb import numpy as np from scipy import stats @@ -12,7 +14,11 @@ RepLGR = logging.getLogger("REPORT") -def calculate_weights(data_optcom, mixing): +def calculate_weights( + *, + data_optcom: np.ndarray, + mixing: np.ndarray, +) -> np.ndarray: """Calculate standardized parameter estimates between data and mixing matrix. Parameters @@ -35,7 +41,11 @@ def calculate_weights(data_optcom, mixing): return weights -def calculate_betas(data, mixing): +def calculate_betas( + *, + data: np.ndarray, + mixing: np.ndarray, +) -> np.ndarray: """Calculate unstandardized parameter estimates between data and mixing matrix. Parameters @@ -66,7 +76,11 @@ def calculate_betas(data, mixing): return betas -def calculate_psc(data_optcom, optcom_betas): +def calculate_psc( + *, + data_optcom: np.ndarray, + optcom_betas: np.ndarray, +) -> np.ndarray: """Calculate percent signal change maps for components against optimally-combined data. Parameters @@ -87,7 +101,11 @@ def calculate_psc(data_optcom, optcom_betas): return psc -def calculate_z_maps(weights, z_max=8): +def calculate_z_maps( + *, + weights: np.ndarray, + z_max: float = 8, +) -> np.ndarray: """Calculate component-wise z-statistic maps. This is done by z-scoring standardized parameter estimate maps and cropping extreme values. @@ -111,7 +129,15 @@ def calculate_z_maps(weights, z_max=8): return z_maps -def calculate_f_maps(data_cat, z_maps, mixing, adaptive_mask, tes, f_max=500): +def calculate_f_maps( + *, + data_cat: np.ndarray, + z_maps: np.ndarray, + mixing: np.ndarray, + adaptive_mask: np.ndarray, + tes: np.ndarray, + f_max: float = 500, +) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """Calculate pseudo-F-statistic maps for TE-dependence and -independence models. Parameters @@ -196,7 +222,14 @@ def calculate_f_maps(data_cat, z_maps, mixing, adaptive_mask, tes, f_max=500): return f_t2_maps, f_s0_maps, pred_t2_maps, pred_s0_maps -def threshold_map(maps, mask, ref_img, threshold, csize=None): +def threshold_map( + *, + maps: np.ndarray, + mask: np.ndarray, + ref_img: nb.Nifti1Image, + threshold: float, + csize: typing.Union[int, None] = None, +) -> np.ndarray: """Perform cluster-extent thresholding. Parameters @@ -234,7 +267,14 @@ def threshold_map(maps, mask, ref_img, threshold, csize=None): return maps_thresh -def threshold_to_match(maps, n_sig_voxels, mask, ref_img, csize=None): +def threshold_to_match( + *, + maps: np.ndarray, + n_sig_voxels: np.ndarray, + mask: np.ndarray, + ref_img: nb.Nifti1Image, + csize: typing.Union[int, None] = None, +) -> np.ndarray: """Cluster-extent threshold a map to target number of significant voxels. Resulting maps have roughly the requested number of significant voxels, after cluster-extent @@ -306,7 +346,12 @@ def threshold_to_match(maps, n_sig_voxels, mask, ref_img, csize=None): return clmaps -def calculate_dependence_metrics(f_t2_maps, f_s0_maps, z_maps): +def calculate_dependence_metrics( + *, + f_t2_maps: np.ndarray, + f_s0_maps: np.ndarray, + z_maps: np.ndarray, +) -> tuple[np.ndarray, np.ndarray]: """Calculate Kappa and Rho metrics from F-statistic maps. Just a weighted average over voxels. @@ -341,7 +386,10 @@ def calculate_dependence_metrics(f_t2_maps, f_s0_maps, z_maps): return kappas, rhos -def calculate_varex(optcom_betas): +def calculate_varex( + *, + optcom_betas: np.ndarray, +) -> np.ndarray: """Calculate unnormalized(?) variance explained from unstandardized parameter estimate maps. Parameters @@ -360,7 +408,10 @@ def calculate_varex(optcom_betas): return varex -def calculate_varex_norm(weights): +def calculate_varex_norm( + *, + weights: np.ndarray, +) -> np.ndarray: """Calculate normalized variance explained from standardized parameter estimate maps. Parameters @@ -378,7 +429,12 @@ def calculate_varex_norm(weights): return varex_norm -def compute_dice(clmaps1, clmaps2, axis=0): +def compute_dice( + *, + clmaps1: np.ndarray, + clmaps2: np.ndarray, + axis: typing.Union[int, None] = 0, +) -> np.ndarray: """Compute the Dice similarity index between two thresholded and binarized maps. NaNs are converted automatically to zeroes. @@ -402,7 +458,13 @@ def compute_dice(clmaps1, clmaps2, axis=0): return dice_values -def compute_signal_minus_noise_z(z_maps, z_clmaps, f_t2_maps, z_thresh=1.95): +def compute_signal_minus_noise_z( + *, + z_maps: np.ndarray, + z_clmaps: np.ndarray, + f_t2_maps: np.ndarray, + z_thresh: float = 1.95, +) -> tuple[np.ndarray, np.ndarray]: """Compare signal and noise z-statistic distributions with a two-sample t-test. Divide voxel-level thresholded F-statistic maps into distributions of @@ -464,7 +526,13 @@ def compute_signal_minus_noise_z(z_maps, z_clmaps, f_t2_maps, z_thresh=1.95): return signal_minus_noise_z, signal_minus_noise_p -def compute_signal_minus_noise_t(z_maps, z_clmaps, f_t2_maps, z_thresh=1.95): +def compute_signal_minus_noise_t( + *, + z_maps: np.ndarray, + z_clmaps: np.ndarray, + f_t2_maps: np.ndarray, + z_thresh: float = 1.95, +) -> tuple[np.ndarray, np.ndarray]: """Compare signal and noise t-statistic distributions with a two-sample t-test. Divide voxel-level thresholded F-statistic maps into distributions of @@ -511,7 +579,10 @@ def compute_signal_minus_noise_t(z_maps, z_clmaps, f_t2_maps, z_thresh=1.95): return signal_minus_noise_t, signal_minus_noise_p -def compute_countsignal(stat_cl_maps): +def compute_countsignal( + *, + stat_cl_maps: np.ndarray, +) -> np.ndarray: """Count the number of significant voxels in a set of cluster-extent thresholded maps. Parameters @@ -528,7 +599,12 @@ def compute_countsignal(stat_cl_maps): return countsignal -def compute_countnoise(stat_maps, stat_cl_maps, stat_thresh=1.95): +def compute_countnoise( + *, + stat_maps: np.ndarray, + stat_cl_maps: np.ndarray, + stat_thresh: float = 1.95, +) -> np.ndarray: """Count the number of significant voxels from non-significant clusters. This is done after application of a cluster-defining threshold, but compared against results @@ -556,7 +632,14 @@ def compute_countnoise(stat_maps, stat_cl_maps, stat_thresh=1.95): return countnoise -def generate_decision_table_score(kappa, dice_ft2, signal_minus_noise_t, countnoise, countsig_ft2): +def generate_decision_table_score( + *, + kappa: np.ndarray, + dice_ft2: np.ndarray, + signal_minus_noise_t: np.ndarray, + countnoise: np.ndarray, + countsig_ft2: np.ndarray, +) -> np.ndarray: """Generate a five-metric decision table. Metrics are ranked in either descending or ascending order if they measure TE-dependence or From fcd9c77e368764a768492d1bd81e9c8b4796b720 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Mon, 22 Apr 2024 11:52:50 -0400 Subject: [PATCH 2/6] Use keyword arguments. --- tedana/metrics/collect.py | 122 +++++++++++++++++++++-------------- tedana/metrics/dependence.py | 4 +- 2 files changed, 76 insertions(+), 50 deletions(-) diff --git a/tedana/metrics/collect.py b/tedana/metrics/collect.py index e71e5ee4a..c748c4c6c 100644 --- a/tedana/metrics/collect.py +++ b/tedana/metrics/collect.py @@ -119,7 +119,10 @@ def generate_metrics( metric_maps = {} if "map weight" in required_metrics: LGR.info("Calculating weight maps") - metric_maps["map weight"] = dependence.calculate_weights(data_optcom, mixing) + metric_maps["map weight"] = dependence.calculate_weights( + data_optcom=data_optcom, + mixing=mixing, + ) signs = determine_signs(metric_maps["map weight"], axis=0) comptable["optimal sign"] = signs metric_maps["map weight"], mixing = flip_components( @@ -128,31 +131,42 @@ def generate_metrics( if "map optcom betas" in required_metrics: LGR.info("Calculating parameter estimate maps for optimally combined data") - metric_maps["map optcom betas"] = dependence.calculate_betas(data_optcom, mixing) + metric_maps["map optcom betas"] = dependence.calculate_betas( + data=data_optcom, + mixing=mixing, + ) if io_generator.verbose: - metric_maps["map echo betas"] = dependence.calculate_betas(data_cat, mixing) + metric_maps["map echo betas"] = dependence.calculate_betas( + data=data_cat, + mixing=mixing, + ) if "map percent signal change" in required_metrics: LGR.info("Calculating percent signal change maps") # used in kundu v3.2 tree metric_maps["map percent signal change"] = dependence.calculate_psc( - data_optcom, metric_maps["map optcom betas"] + data_optcom=data_optcom, + optcom_betas=metric_maps["map optcom betas"], ) if "map Z" in required_metrics: LGR.info("Calculating z-statistic maps") - metric_maps["map Z"] = dependence.calculate_z_maps(metric_maps["map weight"]) + metric_maps["map Z"] = dependence.calculate_z_maps(weights=metric_maps["map weight"]) if io_generator.verbose: io_generator.save_file( utils.unmask(metric_maps["map Z"] ** 2, mask), - label + " component weights img", + f"{label} component weights img", ) if ("map FT2" in required_metrics) or ("map FS0" in required_metrics): LGR.info("Calculating F-statistic maps") m_t2, m_s0, p_m_t2, p_m_s0 = dependence.calculate_f_maps( - data_cat, metric_maps["map Z"], mixing, adaptive_mask, tes + data_cat=data_cat, + z_maps=metric_maps["map Z"], + mixing=mixing, + adaptive_mask=adaptive_mask, + tes=tes, ) metric_maps["map FT2"] = m_t2 metric_maps["map FS0"] = m_s0 @@ -162,58 +176,73 @@ def generate_metrics( if io_generator.verbose: io_generator.save_file( utils.unmask(metric_maps["map FT2"], mask), - label + " component F-T2 img", + f"{label} component F-T2 img", ) io_generator.save_file( utils.unmask(metric_maps["map FS0"], mask), - label + " component F-S0 img", + f"{label} component F-S0 img", ) if "map Z clusterized" in required_metrics: LGR.info("Thresholding z-statistic maps") z_thresh = 1.95 metric_maps["map Z clusterized"] = dependence.threshold_map( - metric_maps["map Z"], mask, ref_img, z_thresh + maps=metric_maps["map Z"], + mask=mask, + ref_img=ref_img, + z_thresh=z_thresh, ) if "map FT2 clusterized" in required_metrics: LGR.info("Calculating T2* F-statistic maps") f_thresh, _, _ = getfbounds(len(tes)) metric_maps["map FT2 clusterized"] = dependence.threshold_map( - metric_maps["map FT2"], mask, ref_img, f_thresh + maps=metric_maps["map FT2"], + mask=mask, + ref_img=ref_img, + f_thresh=f_thresh, ) if "map FS0 clusterized" in required_metrics: LGR.info("Calculating S0 F-statistic maps") f_thresh, _, _ = getfbounds(len(tes)) metric_maps["map FS0 clusterized"] = dependence.threshold_map( - metric_maps["map FS0"], mask, ref_img, f_thresh + maps=metric_maps["map FS0"], + mask=mask, + ref_img=ref_img, + f_thresh=f_thresh, ) # Intermediate metrics if "countsigFT2" in required_metrics: LGR.info("Counting significant voxels in T2* F-statistic maps") comptable["countsigFT2"] = dependence.compute_countsignal( - metric_maps["map FT2 clusterized"] + stat_cl_maps=metric_maps["map FT2 clusterized"], ) if "countsigFS0" in required_metrics: LGR.info("Counting significant voxels in S0 F-statistic maps") comptable["countsigFS0"] = dependence.compute_countsignal( - metric_maps["map FS0 clusterized"] + stat_cl_maps=metric_maps["map FS0 clusterized"], ) # Back to maps if "map beta T2 clusterized" in required_metrics: LGR.info("Thresholding optimal combination beta maps to match T2* F-statistic maps") metric_maps["map beta T2 clusterized"] = dependence.threshold_to_match( - metric_maps["map optcom betas"], comptable["countsigFT2"], mask, ref_img + maps=metric_maps["map optcom betas"], + n_sig_voxels=comptable["countsigFT2"], + mask=mask, + ref_img=ref_img, ) if "map beta S0 clusterized" in required_metrics: LGR.info("Thresholding optimal combination beta maps to match S0 F-statistic maps") metric_maps["map beta S0 clusterized"] = dependence.threshold_to_match( - metric_maps["map optcom betas"], comptable["countsigFS0"], mask, ref_img + maps=metric_maps["map optcom betas"], + n_sig_voxels=comptable["countsigFS0"], + mask=mask, + ref_img=ref_img, ) # Dependence metrics @@ -229,24 +258,23 @@ def generate_metrics( if "variance explained" in required_metrics: LGR.info("Calculating variance explained") comptable["variance explained"] = dependence.calculate_varex( - metric_maps["map optcom betas"] + optcom_betas=metric_maps["map optcom betas"], ) if "normalized variance explained" in required_metrics: LGR.info("Calculating normalized variance explained") comptable["normalized variance explained"] = dependence.calculate_varex_norm( - metric_maps["map weight"] + weights=metric_maps["map weight"], ) # Spatial metrics if "dice_FT2" in required_metrics: LGR.info( - "Calculating DSI between thresholded T2* F-statistic and " - "optimal combination beta maps" + "Calculating DSI between thresholded T2* F-statistic and optimal combination beta maps" ) comptable["dice_FT2"] = dependence.compute_dice( - metric_maps["map beta T2 clusterized"], - metric_maps["map FT2 clusterized"], + clmaps1=metric_maps["map beta T2 clusterized"], + clmaps2=metric_maps["map FT2 clusterized"], axis=0, ) @@ -256,20 +284,18 @@ def generate_metrics( "optimal combination beta maps" ) comptable["dice_FS0"] = dependence.compute_dice( - metric_maps["map beta S0 clusterized"], - metric_maps["map FS0 clusterized"], + clmaps1=metric_maps["map beta S0 clusterized"], + clmaps2=metric_maps["map FS0 clusterized"], axis=0, ) if "signal-noise_t" in required_metrics: LGR.info("Calculating signal-noise t-statistics") RepLGR.info( - "A t-test was performed between the distributions of T2*-model " - "F-statistics associated with clusters (i.e., signal) and " - "non-cluster voxels (i.e., noise) to generate a t-statistic " - "(metric signal-noise_z) and p-value (metric signal-noise_p) " - "measuring relative association of the component to signal " - "over noise." + "A t-test was performed between the distributions of T2*-model F-statistics " + "associated with clusters (i.e., signal) and non-cluster voxels (i.e., noise) to " + "generate a t-statistic (metric signal-noise_z) and p-value (metric signal-noise_p) " + "measuring relative association of the component to signal over noise." ) ( comptable["signal-noise_t"], @@ -283,20 +309,18 @@ def generate_metrics( if "signal-noise_z" in required_metrics: LGR.info("Calculating signal-noise z-statistics") RepLGR.info( - "A t-test was performed between the distributions of T2*-model " - "F-statistics associated with clusters (i.e., signal) and " - "non-cluster voxels (i.e., noise) to generate a z-statistic " - "(metric signal-noise_z) and p-value (metric signal-noise_p) " - "measuring relative association of the component to signal " - "over noise." + "A t-test was performed between the distributions of T2*-model F-statistics " + "associated with clusters (i.e., signal) and non-cluster voxels (i.e., noise) to " + "generate a z-statistic (metric signal-noise_z) and p-value (metric signal-noise_p) " + "measuring relative association of the component to signal over noise." ) ( comptable["signal-noise_z"], comptable["signal-noise_p"], ) = dependence.compute_signal_minus_noise_z( - Z_maps=metric_maps["map Z"], - Z_clmaps=metric_maps["map Z clusterized"], - F_T2_maps=metric_maps["map FT2"], + z_maps=metric_maps["map Z"], + z_clmaps=metric_maps["map Z clusterized"], + f_t2_maps=metric_maps["map FT2"], ) if "countnoise" in required_metrics: @@ -306,18 +330,19 @@ def generate_metrics( "calculated for each component." ) comptable["countnoise"] = dependence.compute_countnoise( - metric_maps["map Z"], metric_maps["map Z clusterized"] + stat_maps=metric_maps["map Z"], + stat_cl_maps=metric_maps["map Z clusterized"], ) # Composite metrics if "d_table_score" in required_metrics: LGR.info("Calculating decision table score") comptable["d_table_score"] = dependence.generate_decision_table_score( - comptable["kappa"], - comptable["dice_FT2"], - comptable["signal-noise_t"], - comptable["countnoise"], - comptable["countsigFT2"], + kappa=comptable["kappa"], + dice_ft2=comptable["dice_FT2"], + signal_minus_noise_t=comptable["signal-noise_t"], + countnoise=comptable["countnoise"], + countsig_ft2=comptable["countsigFT2"], ) # Write verbose metrics if needed @@ -326,6 +351,7 @@ def generate_metrics( write_t2s0 = "map predicted T2" in metric_maps if write_betas: betas = metric_maps["map echo betas"] + if write_t2s0: pred_t2_maps = metric_maps["map predicted T2"] pred_s0_maps = metric_maps["map predicted S0"] @@ -335,7 +361,7 @@ def generate_metrics( echo_betas = betas[:, i_echo, :] io_generator.save_file( utils.unmask(echo_betas, mask), - "echo weight " + label + " map split img", + f"echo weight {label} map split img", echo=(i_echo + 1), ) @@ -343,14 +369,14 @@ def generate_metrics( echo_pred_t2_maps = pred_t2_maps[:, i_echo, :] io_generator.save_file( utils.unmask(echo_pred_t2_maps, mask), - "echo T2 " + label + " split img", + f"echo T2 {label} split img", echo=(i_echo + 1), ) echo_pred_s0_maps = pred_s0_maps[:, i_echo, :] io_generator.save_file( utils.unmask(echo_pred_s0_maps, mask), - "echo S0 " + label + " split img", + f"echo S0 {label} split img", echo=(i_echo + 1), ) diff --git a/tedana/metrics/dependence.py b/tedana/metrics/dependence.py index 8e854e1ff..5edd581d9 100644 --- a/tedana/metrics/dependence.py +++ b/tedana/metrics/dependence.py @@ -50,14 +50,14 @@ def calculate_betas( Parameters ---------- - data : (M x [E] x T) array_like + data : (M [x E] x T) array_like Data to calculate betas for mixing : (T x C) array_like Mixing matrix Returns ------- - betas : (M x [E] x C) array_like + betas : (M [x E] x C) array_like Unstandardized parameter estimates """ if len(data.shape) == 2: From 8f62c6ba1f398c9517a4ed67f690c78b138b3e61 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Mon, 22 Apr 2024 11:56:12 -0400 Subject: [PATCH 3/6] Update tests. --- tedana/tests/test_metrics.py | 65 +++++++++++++++++++++++++++--------- 1 file changed, 50 insertions(+), 15 deletions(-) diff --git a/tedana/tests/test_metrics.py b/tedana/tests/test_metrics.py index dfcd5ee2f..53a4ec19f 100644 --- a/tedana/tests/test_metrics.py +++ b/tedana/tests/test_metrics.py @@ -70,7 +70,7 @@ def test_smoke_calculate_weights(): n_voxels, n_volumes, n_components = 1000, 100, 50 data_optcom = np.random.random((n_voxels, n_volumes)) mixing = np.random.random((n_volumes, n_components)) - weights = dependence.calculate_weights(data_optcom, mixing) + weights = dependence.calculate_weights(data_optcom=data_optcom, mixing=mixing) assert weights.shape == (n_voxels, n_components) @@ -79,7 +79,7 @@ def test_smoke_calculate_betas(): n_voxels, n_volumes, n_components = 1000, 100, 50 data_optcom = np.random.random((n_voxels, n_volumes)) mixing = np.random.random((n_volumes, n_components)) - betas = dependence.calculate_betas(data_optcom, mixing) + betas = dependence.calculate_betas(data=data_optcom, mixing=mixing) assert betas.shape == (n_voxels, n_components) @@ -88,7 +88,7 @@ def test_smoke_calculate_psc(): n_voxels, n_volumes, n_components = 1000, 100, 50 data_optcom = np.random.random((n_voxels, n_volumes)) optcom_betas = np.random.random((n_voxels, n_components)) - psc = dependence.calculate_psc(data_optcom, optcom_betas) + psc = dependence.calculate_psc(data_optcom=data_optcom, optcom_betas=optcom_betas) assert psc.shape == (n_voxels, n_components) @@ -96,7 +96,7 @@ def test_smoke_calculate_z_maps(): """Smoke test for tedana.metrics.dependence.calculate_z_maps.""" n_voxels, n_components = 1000, 50 weights = np.random.random((n_voxels, n_components)) - z_maps = dependence.calculate_z_maps(weights, z_max=4) + z_maps = dependence.calculate_z_maps(weights=weights, z_max=4) assert z_maps.shape == (n_voxels, n_components) @@ -109,7 +109,12 @@ def test_smoke_calculate_f_maps(): adaptive_mask = np.random.randint(1, n_echos + 1, size=n_voxels) tes = np.array([15, 25, 35, 45, 55]) f_t2_maps, f_s0_maps, _, _ = dependence.calculate_f_maps( - data_cat, z_maps, mixing, adaptive_mask, tes, f_max=500 + data_cat=data_cat, + z_maps=z_maps, + mixing=mixing, + adaptive_mask=adaptive_mask, + tes=tes, + f_max=500, ) assert f_t2_maps.shape == f_s0_maps.shape == (n_voxels, n_components) @@ -118,7 +123,7 @@ def test_smoke_calculate_varex(): """Smoke test for tedana.metrics.dependence.calculate_varex.""" n_voxels, n_components = 1000, 50 optcom_betas = np.random.random((n_voxels, n_components)) - varex = dependence.calculate_varex(optcom_betas) + varex = dependence.calculate_varex(optcom_betas=optcom_betas) assert varex.shape == (n_components,) @@ -126,7 +131,7 @@ def test_smoke_calculate_varex_norm(): """Smoke test for tedana.metrics.dependence.calculate_varex_norm.""" n_voxels, n_components = 1000, 50 weights = np.random.random((n_voxels, n_components)) - varex_norm = dependence.calculate_varex_norm(weights) + varex_norm = dependence.calculate_varex_norm(weights=weights) assert varex_norm.shape == (n_components,) @@ -135,9 +140,17 @@ def test_smoke_compute_dice(): n_voxels, n_components = 1000, 50 clmaps1 = np.random.randint(0, 2, size=(n_voxels, n_components)) clmaps2 = np.random.randint(0, 2, size=(n_voxels, n_components)) - dice = dependence.compute_dice(clmaps1, clmaps2, axis=0) + dice = dependence.compute_dice( + clmaps1=clmaps1, + clmaps2=clmaps2, + axis=0, + ) assert dice.shape == (n_components,) - dice = dependence.compute_dice(clmaps1, clmaps2, axis=1) + dice = dependence.compute_dice( + clmaps1=clmaps1, + clmaps2=clmaps2, + axis=1, + ) assert dice.shape == (n_voxels,) @@ -150,7 +163,12 @@ def test_smoke_compute_signal_minus_noise_z(): ( signal_minus_noise_z, signal_minus_noise_p, - ) = dependence.compute_signal_minus_noise_z(z_maps, z_clmaps, f_t2_maps, z_thresh=1.95) + ) = dependence.compute_signal_minus_noise_z( + z_maps=z_maps, + z_clmaps=z_clmaps, + f_t2_maps=f_t2_maps, + z_thresh=1.95, + ) assert signal_minus_noise_z.shape == signal_minus_noise_p.shape == (n_components,) @@ -163,7 +181,12 @@ def test_smoke_compute_signal_minus_noise_t(): ( signal_minus_noise_t, signal_minus_noise_p, - ) = dependence.compute_signal_minus_noise_t(z_maps, z_clmaps, f_t2_maps, z_thresh=1.95) + ) = dependence.compute_signal_minus_noise_t( + z_maps=z_maps, + z_clmaps=z_clmaps, + f_t2_maps=f_t2_maps, + z_thresh=1.95, + ) assert signal_minus_noise_t.shape == signal_minus_noise_p.shape == (n_components,) @@ -171,7 +194,7 @@ def test_smoke_compute_countsignal(): """Smoke test for tedana.metrics.dependence.compute_countsignal.""" n_voxels, n_components = 1000, 50 stat_cl_maps = np.random.randint(0, 2, size=(n_voxels, n_components)) - countsignal = dependence.compute_countsignal(stat_cl_maps) + countsignal = dependence.compute_countsignal(stat_cl_maps=stat_cl_maps) assert countsignal.shape == (n_components,) @@ -180,7 +203,11 @@ def test_smoke_compute_countnoise(): n_voxels, n_components = 1000, 50 stat_maps = np.random.normal(size=(n_voxels, n_components)) stat_cl_maps = np.random.randint(0, 2, size=(n_voxels, n_components)) - countnoise = dependence.compute_countnoise(stat_maps, stat_cl_maps, stat_thresh=1.95) + countnoise = dependence.compute_countnoise( + stat_maps=stat_maps, + stat_cl_maps=stat_cl_maps, + stat_thresh=1.95, + ) assert countnoise.shape == (n_components,) @@ -193,7 +220,11 @@ def test_smoke_generate_decision_table_score(): countnoise = np.random.randint(0, n_voxels, size=n_components) countsigft2 = np.random.randint(0, n_voxels, size=n_components) decision_table_score = dependence.generate_decision_table_score( - kappa, dice_ft2, signal_minus_noise_t, countnoise, countsigft2 + kappa=kappa, + dice_ft2=dice_ft2, + signal_minus_noise_t=signal_minus_noise_t, + countnoise=countnoise, + countsigft2=countsigft2, ) assert decision_table_score.shape == (n_components,) @@ -204,5 +235,9 @@ def test_smoke_calculate_dependence_metrics(): f_t2_maps = np.random.random((n_voxels, n_components)) f_s0_maps = np.random.random((n_voxels, n_components)) z_maps = np.random.random((n_voxels, n_components)) - kappas, rhos = dependence.calculate_dependence_metrics(f_t2_maps, f_s0_maps, z_maps) + kappas, rhos = dependence.calculate_dependence_metrics( + f_t2_maps=f_t2_maps, + f_s0_maps=f_s0_maps, + z_maps=z_maps, + ) assert kappas.shape == rhos.shape == (n_components,) From 0fdb81d5bffebd18f2ba37f5968ef9c8157269cb Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Mon, 22 Apr 2024 12:11:34 -0400 Subject: [PATCH 4/6] Update dependence.py --- tedana/metrics/dependence.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tedana/metrics/dependence.py b/tedana/metrics/dependence.py index 5edd581d9..575af98b3 100644 --- a/tedana/metrics/dependence.py +++ b/tedana/metrics/dependence.py @@ -137,7 +137,7 @@ def calculate_f_maps( adaptive_mask: np.ndarray, tes: np.ndarray, f_max: float = 500, -) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: +) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """Calculate pseudo-F-statistic maps for TE-dependence and -independence models. Parameters @@ -351,7 +351,7 @@ def calculate_dependence_metrics( f_t2_maps: np.ndarray, f_s0_maps: np.ndarray, z_maps: np.ndarray, -) -> tuple[np.ndarray, np.ndarray]: +) -> typing.Tuple[np.ndarray, np.ndarray]: """Calculate Kappa and Rho metrics from F-statistic maps. Just a weighted average over voxels. @@ -464,7 +464,7 @@ def compute_signal_minus_noise_z( z_clmaps: np.ndarray, f_t2_maps: np.ndarray, z_thresh: float = 1.95, -) -> tuple[np.ndarray, np.ndarray]: +) -> typing.Tuple[np.ndarray, np.ndarray]: """Compare signal and noise z-statistic distributions with a two-sample t-test. Divide voxel-level thresholded F-statistic maps into distributions of @@ -532,7 +532,7 @@ def compute_signal_minus_noise_t( z_clmaps: np.ndarray, f_t2_maps: np.ndarray, z_thresh: float = 1.95, -) -> tuple[np.ndarray, np.ndarray]: +) -> typing.Tuple[np.ndarray, np.ndarray]: """Compare signal and noise t-statistic distributions with a two-sample t-test. Divide voxel-level thresholded F-statistic maps into distributions of From 288db78f2b10e4f2b8826b99b55de443d5b50fb2 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Mon, 22 Apr 2024 12:15:18 -0400 Subject: [PATCH 5/6] Update collect.py --- tedana/metrics/collect.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tedana/metrics/collect.py b/tedana/metrics/collect.py index c748c4c6c..6868df5fc 100644 --- a/tedana/metrics/collect.py +++ b/tedana/metrics/collect.py @@ -190,7 +190,7 @@ def generate_metrics( maps=metric_maps["map Z"], mask=mask, ref_img=ref_img, - z_thresh=z_thresh, + threshold=z_thresh, ) if "map FT2 clusterized" in required_metrics: @@ -200,7 +200,7 @@ def generate_metrics( maps=metric_maps["map FT2"], mask=mask, ref_img=ref_img, - f_thresh=f_thresh, + threshold=f_thresh, ) if "map FS0 clusterized" in required_metrics: @@ -210,7 +210,7 @@ def generate_metrics( maps=metric_maps["map FS0"], mask=mask, ref_img=ref_img, - f_thresh=f_thresh, + threshold=f_thresh, ) # Intermediate metrics From 9a1d536672e1145f0a30ab400fb1415ff5211b24 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Mon, 22 Apr 2024 12:17:01 -0400 Subject: [PATCH 6/6] Fix other stuff. --- tedana/selection/selection_nodes.py | 6 +++++- tedana/tests/test_metrics.py | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 5fed5ddde..3cb11ccea 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1812,7 +1812,11 @@ def calc_revised_meanmetricrank_guesses( tmp_countnoise = selector.component_table_.loc[comps2use, "countnoise"].to_numpy() tmp_countsig_ft2 = selector.component_table_.loc[comps2use, "countsigFT2"].to_numpy() tmp_d_table_score = generate_decision_table_score( - tmp_kappa, tmp_dice_ft2, tmp_signal_m_noise_t, tmp_countnoise, tmp_countsig_ft2 + kappa=tmp_kappa, + dice_ft2=tmp_dice_ft2, + signal_minus_noise_t=tmp_signal_m_noise_t, + countnoise=tmp_countnoise, + countsig_ft2=tmp_countsig_ft2, ) selector.component_table_[f"d_table_score_node{selector.current_node_idx_}"] = np.NaN selector.component_table_.loc[ diff --git a/tedana/tests/test_metrics.py b/tedana/tests/test_metrics.py index 53a4ec19f..9d99909cd 100644 --- a/tedana/tests/test_metrics.py +++ b/tedana/tests/test_metrics.py @@ -218,13 +218,13 @@ def test_smoke_generate_decision_table_score(): dice_ft2 = np.random.random(n_components) signal_minus_noise_t = np.random.normal(size=n_components) countnoise = np.random.randint(0, n_voxels, size=n_components) - countsigft2 = np.random.randint(0, n_voxels, size=n_components) + countsig_ft2 = np.random.randint(0, n_voxels, size=n_components) decision_table_score = dependence.generate_decision_table_score( kappa=kappa, dice_ft2=dice_ft2, signal_minus_noise_t=signal_minus_noise_t, countnoise=countnoise, - countsigft2=countsigft2, + countsig_ft2=countsig_ft2, ) assert decision_table_score.shape == (n_components,)