Skip to content

Commit

Permalink
minor edits
Browse files Browse the repository at this point in the history
  • Loading branch information
OnnoKampman committed Apr 21, 2024
1 parent 90af94d commit 9c82b0d
Show file tree
Hide file tree
Showing 5 changed files with 67 additions and 20 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def _get_performance_metric(
for perform_metric in cfg['performance-metrics']:
performance_df = pd.DataFrame(
index=models_list,
columns=cfg['all-covs-types']
columns=cfg['all-covs-types'],
)
for covs_type in cfg['all-covs-types']:

Expand All @@ -115,16 +115,19 @@ def _get_performance_metric(
f'{covs_type:s}_covariance.csv'
)
if not os.path.exists(data_file):
logging.warning(f"Data file '{data_file:s}' not found.")

# Fix renaming issue.
if covs_type == 'boxcar':
data_file = os.path.join(
cfg['data-dir'], noise_type, f'trial_{i_trial:03d}',
'checkerboard_covariance.csv'
)

if not os.path.exists(data_file):
logging.warning(f"Data file '{data_file:s}' not found.")
if not os.path.exists(data_file):
logging.warning(f"Data file '{data_file:s}' not found.")
performance_df.loc[:, covs_type] = np.nan
continue
else:
performance_df.loc[:, covs_type] = np.nan
continue

Expand Down
13 changes: 13 additions & 0 deletions benchmarks/fmri/sim/plotters/plot_TVFC_estimates.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,19 @@ def plot_d2_all_covariance_structures(
config_dict['data-dir'], time_series_noise_type,
f'trial_{i_trial:03d}', f'{covs_type:s}_covariance.csv'
)
if not os.path.exists(data_file):
logging.warning(f"Data file '{data_file:s}' not found.")
if covs_type == 'boxcar':
data_file = os.path.join(
config_dict['data-dir'], time_series_noise_type,
f'trial_{i_trial:03d}', 'checkerboard_covariance.csv'
)
if not os.path.exists(data_file):
logging.warning(f"Data file '{data_file:s}' not found.")
continue
else:
continue

x, y = load_data(
data_file,
verbose=False,
Expand Down
17 changes: 11 additions & 6 deletions benchmarks/fmri/sim/train_models/train_SW_cross_validated.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

data_set_name = sys.argv[1] # 'd2', 'd3d', 'd{%d}s'
data_split = sys.argv[2] # 'all', 'LEOO'
experiment_data = sys.argv[3] # e.g. 'N0200_T0100'
experiment_data = sys.argv[3] # 'Nxxxx_Txxxx'

cfg = get_config_dict(
data_set_name=data_set_name,
Expand Down Expand Up @@ -83,6 +83,7 @@
logging.warning(f"Data file {data_file:s} not found.")
continue
continue

x, y = load_data(
data_file,
verbose=False,
Expand All @@ -107,14 +108,14 @@

for metric in ['correlation', 'covariance']:
tvfc_estimates_savedir = os.path.join(
cfg['experiments-basedir'], noise_type, f'trial_{i_trial:03d}', 'TVFC_estimates',
data_split, metric, model_name
cfg['experiments-basedir'], noise_type, f'trial_{i_trial:03d}',
'TVFC_estimates', data_split, metric, model_name
)
m.save_tvfc_estimates(
optimal_window_length=optimal_window_length,
savedir=tvfc_estimates_savedir,
model_name=f'{covs_type:s}.csv',
connectivity_metric=metric
connectivity_metric=metric,
)

optimal_window_length_array.append(optimal_window_length)
Expand All @@ -123,10 +124,14 @@
optimal_window_length_df[covs_type] = optimal_window_length_array # (n_trials, )

optimal_window_length_filename = 'optimal_window_lengths.csv'
optimal_window_length_savedir = os.path.join(cfg['git-results-basedir'], noise_type, data_split)
optimal_window_length_savedir = os.path.join(
cfg['git-results-basedir'], noise_type, data_split
)
if not os.path.exists(optimal_window_length_savedir):
os.makedirs(optimal_window_length_savedir)
optimal_window_length_df.to_csv(
os.path.join(optimal_window_length_savedir, optimal_window_length_filename)
)
logging.info(f"Saved optimal window lengths '{optimal_window_length_filename:s}' in '{optimal_window_length_savedir:s}'.")
logging.info(
f"Saved optimal window lengths '{optimal_window_length_filename:s}' in '{optimal_window_length_savedir:s}'."
)
18 changes: 14 additions & 4 deletions benchmarks/fmri/sim/train_models/train_WP.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
experiment_data=experiment_data,
hostname=hostname
)
n_trials = int(experiment_data[-4:])
num_trials = int(experiment_data[-4:])
assert os.path.exists(os.path.join(cfg['data-dir']))

# Allow for local and CPU cluster training.
Expand All @@ -49,12 +49,12 @@
noise_types = [noise_type]
covs_types = [covs_type]
except KeyError:
i_trials = range(n_trials)
i_trials = range(num_trials)
noise_types = cfg['noise-types']
covs_types = cfg['all-covs-types']
else:
print('Running locally...')
i_trials = range(n_trials)
i_trials = range(num_trials)
noise_types = cfg['noise-types']
covs_types = cfg['all-covs-types']

Expand All @@ -68,6 +68,7 @@
print(f'Trial {i_trial}')
print(f'covs_type {covs_type:s}')
print(f'noise_type {noise_type:s}', '\n----------\n')

data_file = os.path.join(
cfg['data-dir'], noise_type, f'trial_{i_trial:03d}',
f'{covs_type:s}_covariance.csv'
Expand All @@ -78,6 +79,12 @@
cfg['data-dir'], noise_type, f'trial_{i_trial:03d}',
'checkerboard_covariance.csv'
)
if not os.path.exists(data_file):
logging.warning(f"Data file '{data_file:s}' not found.")
continue
else:
logging.warning(f"Data file '{data_file:s}' not found.")
continue

# Check if model already exists.
model_savedir = os.path.join(
Expand All @@ -94,7 +101,10 @@
f'trial_{i_trial:03d}', model_name, covs_type
)

x, y = load_data(data_file, verbose=False) # (N, 1), (N, D)
x, y = load_data(
data_file,
verbose=False,
) # (N, 1), (N, D)
n_time_series = y.shape[1]

match data_split:
Expand Down
28 changes: 22 additions & 6 deletions benchmarks/fmri/sim/train_models/train_mgarch.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,22 +54,31 @@
covs_types = cfg['all-covs-types']

for noise_type in noise_types:

for covs_type in covs_types:

for i_trial in i_trials:
print('\n----------')
print(f'Trial {i_trial:d}')
print('covs_type ', covs_type)
print('noise_type', noise_type, '\n----------\n')

data_file = os.path.join(
cfg['data-dir'], noise_type, f'trial_{i_trial:03d}',
f'{covs_type:s}_covariance.csv'
)
if not os.path.exists(data_file):
logging.warning(f"Data file {data_file:s} not found.")
if covs_type == 'boxcar':
data_file = os.path.join(
cfg['data-dir'], noise_type, f'trial_{i_trial:03d}',
'checkerboard_covariance.csv'
)
if not os.path.exists(data_file):
logging.warning(f"Data file {data_file:s} not found.")
continue
else:
continue
x, y = load_data(
data_file,
verbose=False
Expand All @@ -93,17 +102,24 @@
for metric in ['correlation', 'covariance']:

tvfc_estimates_savedir = os.path.join(
cfg['experiments-basedir'], noise_type, f'trial_{i_trial:03d}', 'TVFC_estimates',
data_split, metric, f'{model_name:s}_{training_type:s}'
cfg['experiments-basedir'], noise_type, f'trial_{i_trial:03d}',
'TVFC_estimates', data_split, metric, f'{model_name:s}_{training_type:s}'
)
tvfc_estimates_savepath = os.path.join(
tvfc_estimates_savedir, f"{covs_type:s}.csv"
)
tvfc_estimates_savepath = os.path.join(tvfc_estimates_savedir, f"{covs_type:s}.csv")
if not os.path.exists(tvfc_estimates_savepath):
m = MGARCH(mgarch_type=model_name)
m.fit_model(training_data_df=pd.DataFrame(y_train), training_type=training_type)
m.fit_model(
training_data_df=pd.DataFrame(y_train),
training_type=training_type,
)
m.save_tvfc_estimates(
savedir=tvfc_estimates_savedir,
model_name=f'{covs_type:s}.csv',
connectivity_metric=metric
connectivity_metric=metric,
)
else:
logging.info(f"Skipping training: existing model found in '{tvfc_estimates_savedir:s}'.")
logging.info(
f"Skipping training: Existing model found in '{tvfc_estimates_savedir:s}'."
)

0 comments on commit 9c82b0d

Please sign in to comment.