-
Notifications
You must be signed in to change notification settings - Fork 10
/
main.py
145 lines (117 loc) · 5.2 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import pandas as pd
import numpy as np
import wandb
import sys
import re
import test_and_eval
import data_handler
import arg_parser
import helpers
import models
import train
# Prettyprint for dataframes with long values (filenames).
pd.set_option('max_colwidth', 800)
def run():
dh = data_handler.DataHandler(data_columns=['pain'], # or e.g., 'observer',
config_dict=config_dict,
all_subjects_df=all_subjects_df)
if config_dict['get_raw_sequence_data']:
train_sequence_dfs, val_sequence_dfs, test_sequence_dfs = dh.get_data_indices(args)
test_sequence_dfs = dh.round_to_batch_size(test_sequence_dfs)
train_dataset, val_dataset, test_dataset = dh.get_datasets(
df_train=train_sequence_dfs,
df_val=val_sequence_dfs,
df_test=test_sequence_dfs)
train_steps = int(len(train_sequence_dfs)/config_dict['batch_size'])
test_steps = int(len(test_sequence_dfs)/config_dict['batch_size'])
test_labels, test_paths = dh.get_y_batches_paths_from_dfs(test_sequence_dfs)
if config_dict['val_mode'] == 'no_val':
val_steps = 0
else:
val_steps = int(len(val_sequence_dfs)/config_dict['batch_size'])
if args.test_run == 1:
config_dict['nb_epochs'] = 1
train_steps = 2
val_steps = 2
test_steps = 40
test_labels = test_labels[:test_steps]
test_paths = test_paths[:test_steps*config_dict['batch_size']]
# Train the model
model = models.MyModel(config_dict=config_dict)
if config_dict['inference_only']:
best_model_path = config_dict['checkpoint']
else:
best_model_path = train.train(model_instance=model,
config_dict=config_dict,
train_steps=train_steps,
val_steps=val_steps,
train_dataset=train_dataset,
val_dataset=val_dataset)
if config_dict['save_features']:
test_dataset = dh.get_dataset(test_sequence_dfs, train=False)
train.save_features(model.model, config_dict,
steps=test_steps, dataset=test_dataset)
if config_dict['save_features_per_video']:
features = np.load(config_dict['clip_features_path'], allow_pickle=True)
dh.prepare_video_features(features, zero_pad=config_dict['zero_pad_video_features'])
if config_dict['train_video_level_features']:
train_dataset = dh.features_to_dataset(train_subjects, split='train')
if not config_dict['val_mode'] == 'no_val':
val_dataset = dh.features_to_dataset(val_subjects, split='val')
else:
val_dataset = None
print('Training on loaded features...')
# samples = [sample for sample in dataset]
best_model_path = train.video_level_train(
model=model.model,
config_dict=config_dict,
train_dataset=train_dataset,
val_dataset=val_dataset)
if config_dict['do_evaluate']:
if config_dict['video_level_mode']:
test_dataset = dh.features_to_dataset(test_subjects, split='test')
test_paths = [sample[3].numpy().tolist() for sample in test_dataset]
test_steps = len(test_paths)
_ = test_and_eval.evaluate_on_video_level(
config_dict=config_dict,
model=model,
model_path=best_model_path,
test_dataset=test_dataset,
test_steps=test_steps)
else:
test_and_eval.run_evaluation(
config_dict=config_dict,
model=model,
model_path=best_model_path,
test_dataset=test_dataset,
test_steps=test_steps,
y_batches=test_labels,
y_paths=test_paths)
if __name__ == '__main__':
arg_parser = arg_parser.ArgParser(len(sys.argv))
args = arg_parser.parse()
train_subjects = re.split('/', args.train_subjects)
test_subjects = re.split('/', args.test_subjects)
print('Subjects to train on: ', train_subjects)
print('Subjects to test on: ', test_subjects)
config_dict_module = helpers.load_module(args.config_file)
config_dict = config_dict_module.config_dict
if config_dict['val_mode'] == 'no_val':
assert (config_dict['train_mode'] == 'low_level'), \
'no_val requires low level train mode'
config_dict['train_subjects'] = train_subjects
config_dict['test_subjects'] = test_subjects
if config_dict['val_mode'] == 'subject':
val_subjects = re.split('/', args.val_subjects)
config_dict['job_identifier'] = args.job_identifier
print('Job identifier: ', args.job_identifier)
wandb.init(project='pfr', config=config_dict)
wandb.save('models.py')
wandb.save('train.py')
all_subjects_df = pd.read_csv(args.subjects_overview)
if args.test_run == 1:
config_dict['epochs'] = 1
config_dict['video_nb_epochs'] = 1
# Run the whole program, from preparing the data to evaluating
# the model's test performance
run()