-
Notifications
You must be signed in to change notification settings - Fork 37
/
config_default_example.yaml
39 lines (38 loc) · 2.98 KB
/
config_default_example.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# Preprocessing
clip_length: 16 # Number of frames within a clip
clip_offset: 0 # Frame offset between beginning of video and clip (1st clip only)
clip_stride: 1 # Frame offset between successive clips, must be >= 1
crop_shape: [112,112] # (Height, Width) of frame
crop_type: Random # Type of cropping operation (Random, Central and None)
final_shape: [112,112] # (Height, Width) of input to be given to CNN
num_clips: -1 # Number clips to be generated from a video (<0: uniform sampling, 0: Divide entire video into clips, >0: Defines number of clips)
random_offset: 0 # Boolean switch to generate a clip length sized clip from a video
resize_shape: [128,171] # (Height, Width) to resize original data
subtract_mean: '' # Subtract mean (R,G,B) from all frames during preprocessing
# Experiment Setup
acc_metric: Accuracy # Accuracy metric
batch_size: 15 # Numbers of videos in a mini-batch
dataset: HMDB51 # Name of dataset
debug: 0 # If True, do not plot, save, or create data files
epoch: 30 # Total number of epochs
exp: exp # Experiment name
gamma: 0.1 # Multiplier with which to change learning rate
grad_max_norm: 0 # Norm for gradient clipping
json_path: /z/dat/HMDB51/ # Path to the json file for the given dataset
labels: 51 # Number of total classes in the dataset
load_type: train # Environment selection, to include only training/training and validation/testing dataset
loss_type: M_XENTROPY # Loss function
lr: 0.0001 # Learning rate
milestones: [10, 20] # Epoch values to change learning rate
model: C3D # Name of model to be loaded
momentum: 0.9 # Momentum value in optimizer
num_workers: 2 # Number of CPU worker used to load data
opt: sgd # Name of optimizer
preprocess: default # String argument to select preprocessing type
pretrained: 1 # Load pretrained network
pseudo_batch_loop: 1 # Pseudo-batch size multiplier to mimic large minibatches
rerun: 1 # Number of trials to repeat an experiment
save_dir: './results' # Path to results directory
seed: 999 # Seed for reproducibility
weight_decay: 0.0005 # Weight decay
resume: 0 # Flag to resume training or switch to alternate objective after loading