Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

utils restructured in subfolders and removed unused imports #275

Merged
merged 28 commits into from
Sep 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
6a5eb96
utils renamed and black formatting applied
allaffa Aug 19, 2024
c7fae52
bug fixed solved for tests
allaffa Aug 19, 2024
366f6fe
black formatting fixed
allaffa Aug 19, 2024
9a6ce37
examples corrected
allaffa Aug 19, 2024
7084ee9
test_model_loadpred.py fixed
allaffa Aug 19, 2024
a8e0da1
black formatting fixed
allaffa Aug 19, 2024
526a884
test_loss_and_activation_functions.py fixed
allaffa Aug 19, 2024
11da4fb
black formatting fixed
allaffa Aug 20, 2024
0329a14
reverting inadvertent automated refactoring of dataset forlder into d…
allaffa Aug 23, 2024
4b5b98e
reverting inadvertent automated refactoring of dataset forlder into d…
allaffa Aug 23, 2024
d963e81
reverting inadvertent automated refactoring of dataset forlder into d…
allaffa Aug 23, 2024
dfd3942
reverting inadvertent automated refactoring of dataset forlder into d…
allaffa Aug 23, 2024
3d73791
reverting inadvertent automated refactoring of hydragnn into hhydragn…
allaffa Aug 23, 2024
708e393
reverting inadvertent automated refactoring of dataset forlder into d…
allaffa Aug 23, 2024
2f819eb
reverting inadvertent automated refactoring of dataset forlder into d…
allaffa Aug 23, 2024
c50f93e
reverting inadvertent automated refactoring of dataset forlder into d…
allaffa Aug 23, 2024
2ec962f
git formatting fixed
allaffa Aug 23, 2024
bcd91ff
Adagrad converted to Adamax
allaffa Aug 24, 2024
a0c932a
Additional changes to fix bugs and suggestions from erdem
allaffa Sep 3, 2024
9322f7a
imports fixed for LennardJones example
allaffa Sep 20, 2024
f86560e
formatting fixed
allaffa Sep 20, 2024
66ad33e
imports in LJ_data.py fixed
allaffa Sep 20, 2024
0e5e71f
import of graph utils fixed in LJ_data.py
allaffa Sep 20, 2024
2b01472
import of setup.ddp() fixed in LennardJones
allaffa Sep 20, 2024
2bf8a85
setup_log call fixed
allaffa Sep 20, 2024
40738cd
get_summary_writer call fixed
allaffa Sep 20, 2024
b93798a
additional calls fixed
allaffa Sep 20, 2024
f0fa74c
black formatting fixedf
allaffa Sep 20, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions examples/LennardJones/LJ_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@
mpi4py.rc.threads = False

# HydraGNN
from hydragnn.utils.abstractrawdataset import AbstractBaseDataset
from hydragnn.utils import nsplit
from hydragnn.preprocess.utils import get_radius_graph_pbc
from hydragnn.utils.datasets.abstractrawdataset import AbstractBaseDataset
from hydragnn.utils.distributed import nsplit
from hydragnn.preprocess.graph_samples_checks_and_updates import get_radius_graph_pbc

# Angstrom unit
primitive_bravais_lattice_constant_x = 3.8
Expand Down
8 changes: 4 additions & 4 deletions examples/LennardJones/LJ_inference_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,11 @@
import numpy as np

import hydragnn
from hydragnn.utils.time_utils import Timer
from hydragnn.utils.profiling_and_tracing.time_utils import Timer
from hydragnn.utils.distributed import get_device
from hydragnn.utils.model import load_existing_model
from hydragnn.utils.pickledataset import SimplePickleDataset
from hydragnn.utils.config_utils import (
from hydragnn.utils.datasets.pickledataset import SimplePickleDataset
from hydragnn.utils.input_config_parsing.config_utils import (
update_config,
)
from hydragnn.models.create import create_model_config
Expand All @@ -35,7 +35,7 @@
from scipy.interpolate import griddata

try:
from hydragnn.utils.adiosdataset import AdiosWriter, AdiosDataset
from hydragnn.utils.datasets.adiosdataset import AdiosWriter, AdiosDataset
except ImportError:
pass

Expand Down
37 changes: 21 additions & 16 deletions examples/LennardJones/LennardJones.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,16 +30,19 @@

# HydraGNN
import hydragnn
from hydragnn.utils.print_utils import log
from hydragnn.utils.time_utils import Timer
import hydragnn.utils.tracer as tr
from hydragnn.utils.print.print_utils import log
from hydragnn.utils.profiling_and_tracing.time_utils import Timer
import hydragnn.utils.profiling_and_tracing.tracer as tr
from hydragnn.preprocess.load_data import split_dataset
from hydragnn.utils.distdataset import DistDataset
from hydragnn.utils.pickledataset import SimplePickleWriter, SimplePickleDataset
from hydragnn.preprocess.utils import gather_deg
from hydragnn.utils.datasets.distdataset import DistDataset
from hydragnn.utils.datasets.pickledataset import (
SimplePickleWriter,
SimplePickleDataset,
)
from hydragnn.preprocess.graph_samples_checks_and_updates import gather_deg

try:
from hydragnn.utils.adiosdataset import AdiosWriter, AdiosDataset
from hydragnn.utils.datasets.adiosdataset import AdiosWriter, AdiosDataset
except ImportError:
pass

Expand Down Expand Up @@ -117,7 +120,7 @@

##################################################################################################################
# Always initialize for multi-rank training.
comm_size, rank = hydragnn.utils.setup_ddp()
comm_size, rank = hydragnn.utils.distributed.setup_ddp()
##################################################################################################################

comm = MPI.COMM_WORLD
Expand All @@ -130,8 +133,8 @@
)

log_name = "LJ" if args.log is None else args.log
hydragnn.utils.setup_log(log_name)
writer = hydragnn.utils.get_summary_writer(log_name)
hydragnn.utils.print.setup_log(log_name)
writer = hydragnn.utils.model.get_summary_writer(log_name)

log("Command: {0}\n".format(" ".join([x for x in sys.argv])), rank=0)

Expand Down Expand Up @@ -266,27 +269,29 @@
trainset, valset, testset, config["NeuralNetwork"]["Training"]["batch_size"]
)

config = hydragnn.utils.update_config(config, train_loader, val_loader, test_loader)
config = hydragnn.utils.input_config_parsing.update_config(
config, train_loader, val_loader, test_loader
)
## Good to sync with everyone right after DDStore setup
comm.Barrier()

hydragnn.utils.save_config(config, log_name)
hydragnn.utils.input_config_parsing.save_config(config, log_name)

timer.stop()

model = hydragnn.models.create_model_config(
config=config["NeuralNetwork"],
verbosity=verbosity,
)
model = hydragnn.utils.get_distributed_model(model, verbosity)
model = hydragnn.utils.distributed.get_distributed_model(model, verbosity)

learning_rate = config["NeuralNetwork"]["Training"]["Optimizer"]["learning_rate"]
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="min", factor=0.5, patience=5, min_lr=0.00001
)

hydragnn.utils.load_existing_model_config(
hydragnn.utils.model.load_existing_model_config(
model, config["NeuralNetwork"]["Training"], optimizer=optimizer
)

Expand All @@ -307,8 +312,8 @@
compute_grad_energy=True,
)

hydragnn.utils.save_model(model, optimizer, log_name)
hydragnn.utils.print_timers(verbosity)
hydragnn.utils.model.save_model(model, optimizer, log_name)
hydragnn.utils.profiling_and_tracing.print_timers(verbosity)

if tr.has("GPTLTracer"):
import gptl4py as gp
Expand Down
44 changes: 26 additions & 18 deletions examples/alexandria/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,23 @@
from torch_geometric.transforms import Distance, Spherical, LocalCartesian

import hydragnn
from hydragnn.utils.time_utils import Timer
from hydragnn.utils.profiling_and_tracing.time_utils import Timer
from hydragnn.utils.model import print_model
from hydragnn.utils.abstractbasedataset import AbstractBaseDataset
from hydragnn.utils.distdataset import DistDataset
from hydragnn.utils.pickledataset import SimplePickleWriter, SimplePickleDataset
from hydragnn.preprocess.utils import gather_deg
from hydragnn.preprocess.utils import RadiusGraph, RadiusGraphPBC
from hydragnn.utils.datasets.abstractbasedataset import AbstractBaseDataset
from hydragnn.utils.datasets.distdataset import DistDataset
from hydragnn.utils.datasets.pickledataset import (
SimplePickleWriter,
SimplePickleDataset,
)
from hydragnn.preprocess.graph_samples_checks_and_updates import gather_deg
from hydragnn.preprocess.graph_samples_checks_and_updates import (
RadiusGraph,
RadiusGraphPBC,
)
from hydragnn.preprocess.load_data import split_dataset

import hydragnn.utils.tracer as tr
from hydragnn.utils.print_utils import iterate_tqdm, log
import hydragnn.utils.profiling_and_tracing.tracer as tr
from hydragnn.utils.print.print_utils import iterate_tqdm, log

from generate_dictionaries_pure_elements import (
generate_dictionary_bulk_energies,
Expand All @@ -38,7 +44,7 @@
pass

import subprocess
from hydragnn.utils import nsplit
from hydragnn.utils.distributed import nsplit


def info(*args, logtype="info", sep=" "):
Expand Down Expand Up @@ -360,7 +366,7 @@ def get(self, idx):

##################################################################################################################
# Always initialize for multi-rank training.
comm_size, rank = hydragnn.utils.setup_ddp()
comm_size, rank = hydragnn.utils.distributed.setup_ddp()
##################################################################################################################

comm = MPI.COMM_WORLD
Expand All @@ -373,8 +379,8 @@ def get(self, idx):
)

log_name = "Alexandria" if args.log is None else args.log
hydragnn.utils.setup_log(log_name)
writer = hydragnn.utils.get_summary_writer(log_name)
hydragnn.utils.print.setup_log(log_name)
writer = hydragnn.utils.model.get_summary_writer(log_name)

log("Command: {0}\n".format(" ".join([x for x in sys.argv])), rank=0)

Expand Down Expand Up @@ -507,19 +513,21 @@ def get(self, idx):
trainset, valset, testset, config["NeuralNetwork"]["Training"]["batch_size"]
)

config = hydragnn.utils.update_config(config, train_loader, val_loader, test_loader)
config = hydragnn.utils.input_config_parsing.update_config(
config, train_loader, val_loader, test_loader
)
## Good to sync with everyone right after DDStore setup
comm.Barrier()

hydragnn.utils.save_config(config, log_name)
hydragnn.utils.input_config_parsing.save_config(config, log_name)

timer.stop()

model = hydragnn.models.create_model_config(
config=config["NeuralNetwork"],
verbosity=verbosity,
)
model = hydragnn.utils.get_distributed_model(model, verbosity)
model = hydragnn.utils.distributed.get_distributed_model(model, verbosity)

# Print details of neural network architecture
print_model(model)
Expand All @@ -530,7 +538,7 @@ def get(self, idx):
optimizer, mode="min", factor=0.5, patience=5, min_lr=0.00001
)

hydragnn.utils.load_existing_model_config(
hydragnn.utils.model.load_existing_model_config(
model, config["NeuralNetwork"]["Training"], optimizer=optimizer
)

Expand All @@ -550,8 +558,8 @@ def get(self, idx):
create_plots=False,
)

hydragnn.utils.save_model(model, optimizer, log_name)
hydragnn.utils.print_timers(verbosity)
hydragnn.utils.model.save_model(model, optimizer, log_name)
hydragnn.utils.profiling_and_tracing.print_timers(verbosity)

if tr.has("GPTLTracer"):
import gptl4py as gp
Expand Down
48 changes: 27 additions & 21 deletions examples/ani1_x/train.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import os, re, json
import os, json
import logging
import sys
from mpi4py import MPI
Expand All @@ -7,7 +7,6 @@
import numpy as np

import random

import torch

# FIX random seed
Expand All @@ -18,26 +17,31 @@
from torch_geometric.transforms import Distance, Spherical, LocalCartesian

import hydragnn
from hydragnn.utils.time_utils import Timer
from hydragnn.utils.profiling_and_tracing.time_utils import Timer
from hydragnn.utils.model import print_model
from hydragnn.utils.abstractbasedataset import AbstractBaseDataset
from hydragnn.utils.distdataset import DistDataset
from hydragnn.utils.pickledataset import SimplePickleWriter, SimplePickleDataset
from hydragnn.preprocess.utils import gather_deg
from hydragnn.preprocess.utils import RadiusGraph, RadiusGraphPBC
from hydragnn.utils.datasets.abstractbasedataset import AbstractBaseDataset
from hydragnn.utils.datasets.distdataset import DistDataset
from hydragnn.utils.datasets.pickledataset import (
SimplePickleWriter,
SimplePickleDataset,
)
from hydragnn.preprocess.graph_samples_checks_and_updates import gather_deg
from hydragnn.preprocess.graph_samples_checks_and_updates import (
RadiusGraph,
RadiusGraphPBC,
)
from hydragnn.preprocess.load_data import split_dataset

import hydragnn.utils.tracer as tr
import hydragnn.utils.profiling_and_tracing.tracer as tr

from hydragnn.utils.print_utils import iterate_tqdm, log
from hydragnn.utils.print.print_utils import log

try:
from hydragnn.utils.adiosdataset import AdiosWriter, AdiosDataset
except ImportError:
pass

import subprocess
from hydragnn.utils import nsplit
from hydragnn.utils.distributed import nsplit

import h5py

Expand Down Expand Up @@ -242,7 +246,7 @@ def get(self, idx):

##################################################################################################################
# Always initialize for multi-rank training.
comm_size, rank = hydragnn.utils.setup_ddp()
comm_size, rank = hydragnn.utils.distributed.setup_ddp()
##################################################################################################################

comm = MPI.COMM_WORLD
Expand All @@ -255,8 +259,8 @@ def get(self, idx):
)

log_name = "ANI1x" if args.log is None else args.log
hydragnn.utils.setup_log(log_name)
writer = hydragnn.utils.get_summary_writer(log_name)
hydragnn.utils.print.print_utils.setup_log(log_name)
writer = hydragnn.utils.model.get_summary_writer(log_name)

log("Command: {0}\n".format(" ".join([x for x in sys.argv])), rank=0)

Expand Down Expand Up @@ -389,19 +393,21 @@ def get(self, idx):
trainset, valset, testset, config["NeuralNetwork"]["Training"]["batch_size"]
)

config = hydragnn.utils.update_config(config, train_loader, val_loader, test_loader)
config = hydragnn.utils.input_config_parsing.update_config(
config, train_loader, val_loader, test_loader
)
## Good to sync with everyone right after DDStore setup
comm.Barrier()

hydragnn.utils.save_config(config, log_name)
hydragnn.utils.input_config_parsing.save_config(config, log_name)

timer.stop()

model = hydragnn.models.create_model_config(
config=config["NeuralNetwork"],
verbosity=verbosity,
)
model = hydragnn.utils.get_distributed_model(model, verbosity)
model = hydragnn.utils.distributed.get_distributed_model(model, verbosity)

# Print details of neural network architecture
print_model(model)
Expand All @@ -412,7 +418,7 @@ def get(self, idx):
optimizer, mode="min", factor=0.5, patience=5, min_lr=0.00001
)

hydragnn.utils.load_existing_model_config(
hydragnn.utils.model.load_existing_model_config(
model, config["NeuralNetwork"]["Training"], optimizer=optimizer
)

Expand All @@ -432,8 +438,8 @@ def get(self, idx):
create_plots=False,
)

hydragnn.utils.save_model(model, optimizer, log_name)
hydragnn.utils.print_timers(verbosity)
hydragnn.utils.model.save_model(model, optimizer, log_name)
hydragnn.utils.profiling_and_tracing.print_timers(verbosity)

if tr.has("GPTLTracer"):
import gptl4py as gp
Expand Down
Loading
Loading