Skip to content

Commit

Permalink
chunk dataset working
Browse files Browse the repository at this point in the history
  • Loading branch information
MoustHolmes committed Jul 14, 2023
1 parent 2d287d6 commit 0dbe67c
Show file tree
Hide file tree
Showing 17 changed files with 12,744 additions and 194 deletions.
124 changes: 124 additions & 0 deletions .hydra/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
task_name: train
tags:
- transformer
- optimizer_test
train: true
test: true
compile: false
ckpt_path: null
seed: 12345
data:
_target_: src.data.datapipe_icecube_datamodule.IceCubeDatamodule
db_path: /groups/icecube/petersen/GraphNetDatabaseRepository/Upgrade_Data/sqlite3/dev_step4_upgrade_028_with_noise_dynedge_pulsemap_v3_merger_aftercrash.db
pulsemap: SplitInIcePulses_dynedge_v2_Pulses
train_csv_file: /groups/icecube/moust/work/IceCubeEncoderTransformer/selections/train_upgrade_pure_numu_selection_event_no.csv
test_csv_file: /groups/icecube/moust/work/IceCubeEncoderTransformer/selections/test_upgrade_pure_numu_selection_event_no.csv
val_csv_file: /groups/icecube/moust/work/IceCubeEncoderTransformer/selections/valid_upgrade_pure_numu_selection_event_no.csv
input_cols:
- charge
- dom_time
- dom_x
- dom_y
- dom_z
- pmt_dir_x
- pmt_dir_y
- pmt_dir_z
target_cols:
- energy
truth_table: truth
max_token_count: 50000
num_workers: 4
model:
_target_: src.models.simple_transformer_encoder_pooling_module.SimpleTransformerEncoderPoolingLitModule
optimizer:
_target_: torch.optim.Adam
_partial_: true
lr: 0.001
weight_decay: 0.0
scheduler:
_target_: torch.optim.lr_scheduler.ReduceLROnPlateau
_partial_: true
mode: min
factor: 0.1
patience: 10
model:
_target_: src.models.components.simple_transformer_encoder_pooling.SimpleTransformerEncoderPooling
input_size: 8
d_model: 64
nhead: 2
dim_feedforward: 256
dropout: 0.1
num_layers: 2
output_size: 1
callbacks:
model_checkpoint:
_target_: lightning.pytorch.callbacks.ModelCheckpoint
dirpath: ${paths.output_dir}/checkpoints
filename: epoch_{epoch:03d}
monitor: val/loss
verbose: false
save_last: true
save_top_k: 1
mode: min
auto_insert_metric_name: false
save_weights_only: false
every_n_train_steps: null
train_time_interval: null
every_n_epochs: null
save_on_train_epoch_end: null
early_stopping:
_target_: lightning.pytorch.callbacks.EarlyStopping
monitor: val/loss
min_delta: 0.0
patience: 100
verbose: false
mode: min
strict: true
check_finite: true
stopping_threshold: null
divergence_threshold: null
check_on_train_epoch_end: null
model_summary:
_target_: lightning.pytorch.callbacks.RichModelSummary
max_depth: -1
rich_progress_bar:
_target_: lightning.pytorch.callbacks.RichProgressBar
logger:
wandb:
_target_: lightning.pytorch.loggers.wandb.WandbLogger
save_dir: ${paths.output_dir}
offline: false
id: null
anonymous: null
project: lightning-hydra-template
log_model: false
prefix: ''
entity: graphnet-team
group: transformer
tags: ${tags}
job_type: ''
trainer:
_target_: lightning.pytorch.trainer.Trainer
default_root_dir: ${paths.output_dir}
min_epochs: 1
max_epochs: 3
accelerator: gpu
devices: 2
check_val_every_n_epoch: 1
deterministic: false
strategy: ddp
num_nodes: 1
sync_batchnorm: true
limit_train_batches: 20
limit_val_batches: 10
limit_test_batches: 10
paths:
root_dir: ${oc.env:PROJECT_ROOT}
data_dir: ${paths.root_dir}/data/
log_dir: ${paths.root_dir}/logs/
output_dir: ${hydra:runtime.output_dir}
work_dir: ${hydra:runtime.cwd}
extras:
ignore_warnings: false
enforce_tags: true
print_config: true
180 changes: 180 additions & 0 deletions .hydra/hydra.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
hydra:
run:
dir: /lustre/hpc/icecube/moust/work/IceCubeEncoderTransformer
sweep:
dir: ${paths.log_dir}/${task_name}/multiruns/${now:%Y-%m-%d}_${now:%H-%M-%S}
subdir: ${hydra.job.num}
launcher:
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
sweeper:
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
max_batch_size: null
params: null
help:
app_name: ${hydra.job.name}
header: '${hydra.help.app_name} is powered by Hydra.
'
footer: 'Powered by Hydra (https://hydra.cc)
Use --hydra-help to view Hydra specific help
'
template: '${hydra.help.header}
== Configuration groups ==
Compose your configuration from those groups (group=option)
$APP_CONFIG_GROUPS
== Config ==
Override anything in the config (foo.bar=value)
$CONFIG
${hydra.help.footer}
'
hydra_help:
template: 'Hydra (${hydra.runtime.version})
See https://hydra.cc for more info.
== Flags ==
$FLAGS_HELP
== Configuration groups ==
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
to command line)
$HYDRA_CONFIG_GROUPS
Use ''--cfg hydra'' to Show the Hydra config.
'
hydra_help: ???
hydra_logging:
version: 1
formatters:
colorlog:
(): colorlog.ColoredFormatter
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
handlers:
console:
class: logging.StreamHandler
formatter: colorlog
stream: ext://sys.stdout
root:
level: INFO
handlers:
- console
disable_existing_loggers: false
job_logging:
version: 1
formatters:
simple:
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
colorlog:
(): colorlog.ColoredFormatter
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
- %(message)s'
log_colors:
DEBUG: purple
INFO: green
WARNING: yellow
ERROR: red
CRITICAL: red
handlers:
console:
class: logging.StreamHandler
formatter: colorlog
stream: ext://sys.stdout
file:
class: logging.FileHandler
formatter: simple
filename: ${hydra.job.name}.log
root:
level: INFO
handlers:
- console
- file
disable_existing_loggers: false
env: {}
mode: RUN
searchpath: []
callbacks: {}
output_subdir: .hydra
overrides:
hydra:
- hydra.run.dir="/lustre/hpc/icecube/moust/work/IceCubeEncoderTransformer"
- hydra.job.name=train_ddp_process_1
- hydra.mode=RUN
task:
- experiment=opt_test
job:
name: train_ddp_process_1
chdir: null
override_dirname: experiment=opt_test
id: ???
num: ???
config_name: train.yaml
env_set: {}
env_copy: []
config:
override_dirname:
kv_sep: '='
item_sep: ','
exclude_keys: []
runtime:
version: 1.3.2
version_base: '1.3'
cwd: /lustre/hpc/icecube/moust/work/IceCubeEncoderTransformer
config_sources:
- path: hydra.conf
schema: pkg
provider: hydra
- path: /lustre/hpc/icecube/moust/work/IceCubeEncoderTransformer/configs
schema: file
provider: main
- path: hydra_plugins.hydra_colorlog.conf
schema: pkg
provider: hydra-colorlog
- path: ''
schema: structured
provider: schema
output_dir: /lustre/hpc/icecube/moust/work/IceCubeEncoderTransformer
choices:
debug: null
local: default.yaml
hparams_search: null
experiment: opt_test
hydra: default.yaml
extras: default.yaml
paths: default.yaml
trainer: default.yaml
logger: wandb
callbacks: default.yaml
model: simple_transformer_encoder_pooling.yaml
data: upgrade_energy.yaml
hydra/env: default
hydra/callbacks: null
hydra/job_logging: colorlog
hydra/hydra_logging: colorlog
hydra/hydra_help: default
hydra/help: default
hydra/sweeper: basic
hydra/launcher: basic
hydra/output: default
verbose: false
1 change: 1 addition & 0 deletions .hydra/overrides.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
- experiment=opt_test
Loading

0 comments on commit 0dbe67c

Please sign in to comment.