diff --git a/examples/operator/advection_aligned_pideeponet.py b/examples/operator/advection_aligned_pideeponet.py index 9eaedffff..c0ab0e2f6 100644 --- a/examples/operator/advection_aligned_pideeponet.py +++ b/examples/operator/advection_aligned_pideeponet.py @@ -74,7 +74,7 @@ def periodic(x): model = dde.Model(data, net) model.compile("adam", lr=0.0005) -losshistory, train_state = model.train(epochs=50000) +losshistory, train_state = model.train(iterations=50000) dde.utils.plot_loss_history(losshistory) x = np.linspace(0, 1, num=100) diff --git a/examples/operator/advection_aligned_pideeponet_2d.py b/examples/operator/advection_aligned_pideeponet_2d.py index 02f7226a0..bb5115402 100644 --- a/examples/operator/advection_aligned_pideeponet_2d.py +++ b/examples/operator/advection_aligned_pideeponet_2d.py @@ -79,7 +79,7 @@ def periodic(x): model = dde.Model(data, net) model.compile("adam", lr=0.0005) -losshistory, train_state = model.train(epochs=30000) +losshistory, train_state = model.train(iterations=30000) dde.utils.plot_loss_history(losshistory) x = np.linspace(0, 1, num=100) diff --git a/examples/operator/advection_unaligned_pideeponet.py b/examples/operator/advection_unaligned_pideeponet.py index 0db63fa06..e236ad92c 100644 --- a/examples/operator/advection_unaligned_pideeponet.py +++ b/examples/operator/advection_unaligned_pideeponet.py @@ -74,7 +74,7 @@ def periodic(x): model = dde.Model(data, net) model.compile("adam", lr=0.0005) -losshistory, train_state = model.train(epochs=50000) +losshistory, train_state = model.train(iterations=50000) dde.utils.plot_loss_history(losshistory) x = np.linspace(0, 1, num=100) diff --git a/examples/operator/advection_unaligned_pideeponet_2d.py b/examples/operator/advection_unaligned_pideeponet_2d.py index e4bf7ddbb..ce4c66a42 100644 --- a/examples/operator/advection_unaligned_pideeponet_2d.py +++ b/examples/operator/advection_unaligned_pideeponet_2d.py @@ -77,7 +77,7 @@ def periodic(x): model = dde.Model(data, net) model.compile("adam", lr=0.0005) -losshistory, train_state = model.train(epochs=10000) +losshistory, train_state = model.train(iterations=10000) dde.utils.plot_loss_history(losshistory) x = np.linspace(0, 1, num=100) diff --git a/examples/operator/antiderivative_aligned_pideeponet.py b/examples/operator/antiderivative_aligned_pideeponet.py index c2fa243c0..006c23286 100644 --- a/examples/operator/antiderivative_aligned_pideeponet.py +++ b/examples/operator/antiderivative_aligned_pideeponet.py @@ -57,7 +57,7 @@ def zero_ic(inputs, outputs): model = dde.Model(data, net) model.compile("adam", lr=0.0005) -losshistory, train_state = model.train(epochs=40000) +losshistory, train_state = model.train(iterations=40000) dde.utils.plot_loss_history(losshistory) diff --git a/examples/operator/antiderivative_unaligned_pideeponet.py b/examples/operator/antiderivative_unaligned_pideeponet.py index 4ea3ee22c..0e4365ad9 100644 --- a/examples/operator/antiderivative_unaligned_pideeponet.py +++ b/examples/operator/antiderivative_unaligned_pideeponet.py @@ -40,7 +40,7 @@ def zero_ic(inputs, outputs): model = dde.Model(data, net) model.compile("adam", lr=0.0005) -losshistory, train_state = model.train(epochs=40000) +losshistory, train_state = model.train(iterations=40000) dde.utils.plot_loss_history(losshistory) diff --git a/examples/operator/diff_rec_aligned_pideeponet.py b/examples/operator/diff_rec_aligned_pideeponet.py index 2b594ca76..c2f0d90cd 100644 --- a/examples/operator/diff_rec_aligned_pideeponet.py +++ b/examples/operator/diff_rec_aligned_pideeponet.py @@ -51,7 +51,7 @@ def pde(x, y, v): model = dde.Model(data, net) model.compile("adam", lr=0.0005) -losshistory, train_state = model.train(epochs=20000) +losshistory, train_state = model.train(iterations=20000) dde.utils.plot_loss_history(losshistory) func_feats = func_space.random(1) diff --git a/examples/operator/diff_rec_unaligned_pideeponet.py b/examples/operator/diff_rec_unaligned_pideeponet.py index 83140ec26..9530915a8 100644 --- a/examples/operator/diff_rec_unaligned_pideeponet.py +++ b/examples/operator/diff_rec_unaligned_pideeponet.py @@ -51,7 +51,7 @@ def pde(x, y, v): model = dde.Model(data, net) model.compile("adam", lr=0.0005) -losshistory, train_state = model.train(epochs=50000) +losshistory, train_state = model.train(iterations=50000) dde.utils.plot_loss_history(losshistory) func_feats = func_space.random(1) diff --git a/examples/pinn_forward/Allen_Cahn.py b/examples/pinn_forward/Allen_Cahn.py index 7cdafaacc..e5c82591e 100644 --- a/examples/pinn_forward/Allen_Cahn.py +++ b/examples/pinn_forward/Allen_Cahn.py @@ -55,7 +55,7 @@ def output_transform(x, y): model = dde.Model(data, net) model.compile("adam", lr=1e-3) -model.train(epochs=40000) +model.train(iterations=40000) model.compile("L-BFGS") losshistory, train_state = model.train() dde.saveplot(losshistory, train_state, issave=True, isplot=True) diff --git a/examples/pinn_forward/Beltrami_flow.py b/examples/pinn_forward/Beltrami_flow.py index 3779692b9..cacd4a901 100644 --- a/examples/pinn_forward/Beltrami_flow.py +++ b/examples/pinn_forward/Beltrami_flow.py @@ -166,7 +166,7 @@ def p_func(x): model = dde.Model(data, net) model.compile("adam", lr=1e-3, loss_weights=[1, 1, 1, 1, 100, 100, 100, 100, 100, 100]) -model.train(epochs=30000) +model.train(iterations=30000) model.compile("L-BFGS", loss_weights=[1, 1, 1, 1, 100, 100, 100, 100, 100, 100]) losshistory, train_state = model.train() diff --git a/examples/pinn_forward/Burgers_RAR.py b/examples/pinn_forward/Burgers_RAR.py index 6372d1c1d..c3caabd87 100644 --- a/examples/pinn_forward/Burgers_RAR.py +++ b/examples/pinn_forward/Burgers_RAR.py @@ -52,7 +52,7 @@ def pde(x, y): data.add_anchors(X[x_id]) early_stopping = dde.callbacks.EarlyStopping(min_delta=1e-4, patience=2000) model.compile("adam", lr=1e-3) - model.train(epochs=10000, disregard_previous_best=True, callbacks=[early_stopping]) + model.train(iterations=10000, disregard_previous_best=True, callbacks=[early_stopping]) model.compile("L-BFGS") losshistory, train_state = model.train() dde.saveplot(losshistory, train_state, issave=True, isplot=True) diff --git a/examples/pinn_forward/Helmholtz_Dirichlet_2d_HPO.py b/examples/pinn_forward/Helmholtz_Dirichlet_2d_HPO.py index b9c029f36..940e6a6e6 100644 --- a/examples/pinn_forward/Helmholtz_Dirichlet_2d_HPO.py +++ b/examples/pinn_forward/Helmholtz_Dirichlet_2d_HPO.py @@ -29,7 +29,7 @@ k0 = 2 * np.pi * n precision_train = 10 precision_test = 30 -epochs = 10000 +iterations = 10000 def pde(x, y): @@ -89,7 +89,7 @@ def create_model(config): def train_model(model, config): - losshistory, train_state = model.train(epochs=epochs) + losshistory, train_state = model.train(iterations=iterations) train = np.array(losshistory.loss_train).sum(axis=1).ravel() test = np.array(losshistory.loss_test).sum(axis=1).ravel() metric = np.array(losshistory.metrics_test).sum(axis=1).ravel() diff --git a/examples/pinn_forward/Helmholtz_Sound_hard_ABC_2d.py b/examples/pinn_forward/Helmholtz_Sound_hard_ABC_2d.py index 5d251cf4f..31e191b7f 100644 --- a/examples/pinn_forward/Helmholtz_Sound_hard_ABC_2d.py +++ b/examples/pinn_forward/Helmholtz_Sound_hard_ABC_2d.py @@ -6,7 +6,7 @@ # General parameters weights = 1 -epochs = 10000 +iterations = 10000 learning_rate = 1e-3 num_dense_layers = 3 num_dense_nodes = 350 @@ -133,6 +133,6 @@ def func1_outer(x, y): model.compile( "adam", lr=learning_rate, loss_weights=loss_weights, metrics=["l2 relative error"] ) -losshistory, train_state = model.train(epochs=epochs) +losshistory, train_state = model.train(iterations=iterations) dde.saveplot(losshistory, train_state, issave=True, isplot=True) diff --git a/examples/pinn_forward/heat_resample.py b/examples/pinn_forward/heat_resample.py index 057257e93..464a97f28 100755 --- a/examples/pinn_forward/heat_resample.py +++ b/examples/pinn_forward/heat_resample.py @@ -97,7 +97,7 @@ def pde(x, y): # Build and train the model: model.compile("adam", lr=1e-3) -model.train(epochs=200000, callbacks=[pde_resampler]) +model.train(iterations=200000, callbacks=[pde_resampler]) model.compile("L-BFGS") losshistory, train_state = model.train(callbacks=[pde_resampler]) diff --git a/examples/pinn_inverse/Navier_Stokes_inverse.py b/examples/pinn_inverse/Navier_Stokes_inverse.py index 46bfbbd3f..d0f8d0a14 100644 --- a/examples/pinn_inverse/Navier_Stokes_inverse.py +++ b/examples/pinn_inverse/Navier_Stokes_inverse.py @@ -127,7 +127,7 @@ def Navier_Stokes_Equation(x, y): dde.saveplot(loss_history, train_state, issave=True, isplot=True) model.compile("adam", lr=1e-4, external_trainable_variables=[C1, C2]) loss_history, train_state = model.train( - epochs=10000, callbacks=[variable], display_every=1000, disregard_previous_best=True + iterations=10000, callbacks=[variable], display_every=1000, disregard_previous_best=True ) dde.saveplot(loss_history, train_state, issave=True, isplot=True) # model.save(save_path = "./NS_inverse_model/model") diff --git a/examples/pinn_inverse/elliptic_inverse_field_batch.py b/examples/pinn_inverse/elliptic_inverse_field_batch.py index ef9b4a25e..03069a9d2 100644 --- a/examples/pinn_inverse/elliptic_inverse_field_batch.py +++ b/examples/pinn_inverse/elliptic_inverse_field_batch.py @@ -1,4 +1,4 @@ -"""Backend supported: tensorflow.compat.v1, pytorch, paddle""" +"""Backend supported: pytorch, paddle""" import deepxde as dde import matplotlib.pyplot as plt import numpy as np @@ -42,7 +42,7 @@ def sol(x): model = dde.Model(data, net) model.compile("adam", lr=0.0001, loss_weights=[1, 100, 1000]) -losshistory, train_state = model.train(epochs=20000, callbacks=[pde_resampler]) +losshistory, train_state = model.train(iterations=20000, callbacks=[pde_resampler]) dde.saveplot(losshistory, train_state, issave=True, isplot=True) # view results