diff --git a/grama/comp_building.py b/grama/comp_building.py index c01f509..8ad4aaf 100644 --- a/grama/comp_building.py +++ b/grama/comp_building.py @@ -203,7 +203,7 @@ def comp_function(model, fun=None, var=None, out=None, name=None, runtime=0): Args: model (gr.Model): Model to compose - fun (function): Function taking R^d -> R^r + fun (function): Function taking at least one real input and returns at least one real output (fun: R^d -> R^r). Each input to `fun` must be a scalar (See examples below). var (list(string)): List of variable names or number of inputs out (list(string)): List of output names or number of outputs runtime (numeric): Estimated single-eval runtime (in seconds) @@ -228,6 +228,26 @@ def comp_function(model, fun=None, var=None, out=None, name=None, runtime=0): ) ) + ## Providing a function with multiple inputs + md2 = ( + gr.Model("test 2") + >> gr.cp_function( + fun=lambda x, y: x + y, + var=["x", "y"], + out=["f"], + ) + ) + + ## Providing a function with multiple inputs and multiple outputs + md3 = ( + gr.Model("test 3") + >> gr.cp_function( + fun=lambda x, y: [x + y, x - y], + var=["x", "y"], + out=["f", "g"], + ) + ) + """ model_new = model.copy() diff --git a/grama/core.py b/grama/core.py index 313aeee..58c7aa1 100644 --- a/grama/core.py +++ b/grama/core.py @@ -106,8 +106,9 @@ def eval(self, df): ## Set up output n_rows = df.shape[0] results = zeros((n_rows, len(self.out))) + for ind in range(n_rows): - results[ind] = self.func(df.loc[ind, self.var]) + results[ind] = self.func(*df.loc[ind, self.var]) ## Package output as DataFrame return DataFrame(data=results, columns=self.out) diff --git a/grama/eval_defaults.py b/grama/eval_defaults.py index 63cc2da..7b22229 100644 --- a/grama/eval_defaults.py +++ b/grama/eval_defaults.py @@ -684,6 +684,7 @@ def eval_sample(model, n=None, df_det=None, seed=None, append=True, skip=False, return df_samp + df_res = eval_df(model, df=df_samp, append=append) ## Attach metadata with catch_warnings(): diff --git a/grama/fit/fit_scikitlearn.py b/grama/fit/fit_scikitlearn.py index 1d3ee64..926a56b 100644 --- a/grama/fit/fit_scikitlearn.py +++ b/grama/fit/fit_scikitlearn.py @@ -121,6 +121,7 @@ def __init__(self, regressor, var, out, name, runtime): def eval(self, df): ## Check invariant; model inputs must be subset of df columns + if not set(self.var).issubset(set(df.columns)): raise ValueError( "Model function `{}` var not a subset of given columns".format( diff --git a/grama/models/circuit_RLC.py b/grama/models/circuit_RLC.py index 0264201..9bf6c9c 100644 --- a/grama/models/circuit_RLC.py +++ b/grama/models/circuit_RLC.py @@ -21,7 +21,7 @@ def make_prlc(): var=["L", "C"], out=["omega0"], ) - >> cp_function( + >> cp_vec_function( fun=lambda df: df_make(Q=df.omega0 * df.R * df.C), name="parallel RLC", var=["omega0", "R", "C"], diff --git a/grama/models/ishigami.py b/grama/models/ishigami.py index e7e1f42..e7e7c64 100644 --- a/grama/models/ishigami.py +++ b/grama/models/ishigami.py @@ -5,8 +5,7 @@ from numpy import sin, pi -def fun(x): - a, b, x1, x2, x3 = x +def fun(a, b, x1, x2, x3): return sin(x1) + a * sin(x2)**2 + b * x3**4 * sin(x1) def make_ishigami(): diff --git a/grama/models/linear_normal.py b/grama/models/linear_normal.py index ab60327..0e2e294 100644 --- a/grama/models/linear_normal.py +++ b/grama/models/linear_normal.py @@ -3,9 +3,7 @@ from grama import cp_copula_independence, cp_function, cp_marginals, Model -def limit_state(x): - x1, x2 = x - +def limit_state(x1, x2): return 1 - x1 - x2 def make_linear_normal(): diff --git a/grama/models/pipe_flow.py b/grama/models/pipe_flow.py index eebc458..7d04a5f 100644 --- a/grama/models/pipe_flow.py +++ b/grama/models/pipe_flow.py @@ -5,29 +5,29 @@ from math import sqrt, log10, pow, log from scipy.optimize import bisect -def re_fcn(q): +def re_fcn(rho,u,d,mu,eps): # {rho,u,d,mu,eps} - return q[0]*q[1]*q[2]/q[3] + return rho*u*d/mu -def f_lam(q): +def f_lam(rho,u,d,mu,eps): # {rho,u,d,mu,eps} - return 64. / re_fcn(q) + return 64. / re_fcn(rho,u,d,mu,eps) -def colebrook(q,f): +def colebrook(rho,u,d,mu,eps,f): # {rho,u,d,mu,eps} - fs = sqrt(f); Re = re_fcn(q) - return 1 + 2.*fs*log10(q[4]/3.6/q[2] + 2.51/Re/fs) + fs = sqrt(f); Re = re_fcn(rho,u,d,mu,eps) + return 1 + 2.*fs*log10(eps/3.6/d + 2.51/Re/fs) -def f_tur(q): - return bisect(lambda f: colebrook(q,f), 1e-5, 10) +def f_tur(rho,u,d,mu,eps): + return bisect(lambda f: colebrook(rho,u,d,mu,eps,f), 1e-5, 10) Re_c = 3e3 -def fcn_pipe(q): - Re = re_fcn(q) +def fcn_pipe(rho,u,d,mu,eps): + Re = re_fcn(rho,u,d,mu,eps) if Re < Re_c: - return f_lam(q) + return f_lam(rho,u,d,mu,eps) else: - return f_tur(q) + return f_tur(rho,u,d,mu,eps) def make_pipe_friction(): r"""Pipe Friction Factor diff --git a/grama/models/plane_laminate.py b/grama/models/plane_laminate.py index 22c0516..abc6fda 100644 --- a/grama/models/plane_laminate.py +++ b/grama/models/plane_laminate.py @@ -413,7 +413,7 @@ def mapSign(x): name=name, functions=[ Function( - lambda X: uniaxial_stress_limit(X), + lambda *X: uniaxial_stress_limit(X), make_names(Theta_nom), list(itertools.chain.from_iterable([ ["g_11_tension_{}".format(i), diff --git a/grama/models/test.py b/grama/models/test.py index 59a9c70..db3626e 100644 --- a/grama/models/test.py +++ b/grama/models/test.py @@ -9,8 +9,7 @@ df_test_input = DataFrame(data={"x0": [0], "x1": [0], "x2": [0]}) ## Define a test model -def fun(x): - x1, x2, x3 = x +def fun(x1, x2, x3): return x1 + x2 + x3 def make_test(): diff --git a/tests/test_core.py b/tests/test_core.py index d82ef39..0ef746e 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -34,7 +34,7 @@ def setUp(self): self.model_2d = gr.Model( functions=[ gr.Function( - lambda x: [x[0], x[1]], ["x0", "x1"], ["y0", "y1"], "test", 0 + lambda x0, x1: [x0, x1], ["x0", "x1"], ["y0", "y1"], "test", 0 ), ], domain=domain_2d, @@ -49,7 +49,7 @@ def setUp(self): self.model_3d = gr.Model( functions=[ gr.Function( - lambda x: x[0] + x[1] + x[2], ["x", "y", "z"], ["f"], "test", 0 + lambda x, y, z: x + y + z, ["x", "y", "z"], ["f"], "test", 0 ) ], density=gr.Density(marginals=marginals), @@ -111,7 +111,7 @@ def test_var_outer(self): def test_drop_out(self): """Checks that output column names are properly dropped""" - md = gr.Model() >> gr.cp_function(lambda x: x[0] + 1, var=1, out=1) + md = gr.Model() >> gr.cp_function(lambda x0: x0 + 1, var=1, out=1) df_in = gr.df_make(x0=[0, 1, 2], y0=[0, 1, 2]) df_true = gr.df_make(x0=[0, 1, 2], y0=[1, 2, 3]) @@ -194,7 +194,7 @@ def test_dag(self): md = ( gr.Model("model") >> gr.cp_function(lambda x: x, var=1, out=1) - >> gr.cp_function(lambda x: x[0] + x[1], var=["x0", "y0"], out=1) + >> gr.cp_function(lambda x0, y0: x0 + y0, var=["x0", "y0"], out=1) ) G_true = nx.DiGraph() diff --git a/tests/test_evals.py b/tests/test_evals.py index 762e8bc..ed7e97f 100644 --- a/tests/test_evals.py +++ b/tests/test_evals.py @@ -161,7 +161,7 @@ def setUp(self): self.model_2d = gr.Model( functions=[ - gr.Function(lambda x: [x[0], x[1]], ["x", "y"], ["f", "g"], "test", 0) + gr.Function(lambda x, y: [x, y], ["x", "y"], ["f", "g"], "test", 0) ], domain=domain_2d, density=gr.Density( @@ -310,7 +310,7 @@ def test_grad_fd(self): ## Flags md_test = ( gr.Model() - >> gr.cp_function(fun=lambda x: x[0] + x[1] ** 2, var=2, out=1) + >> gr.cp_function(fun=lambda x0, x1: x0 + x1 ** 2, var=2, out=1) >> gr.cp_marginals(x0={"dist": "norm", "loc": 0, "scale": 1}) ) df_base = pd.DataFrame(dict(x0=[0, 1], x1=[0, 1])) @@ -368,7 +368,7 @@ def setUp(self): self.md_2d = ( gr.Model() - >> gr.cp_function(fun=lambda x: x[0], var=2, out=1) + >> gr.cp_function(fun=lambda x0, x1: x0, var=2, out=1) >> gr.cp_marginals( x0={"dist": "uniform", "loc": 0, "scale": 1}, x1={"dist": "uniform", "loc": 0, "scale": 1}, @@ -378,7 +378,7 @@ def setUp(self): self.md_mixed = ( gr.Model() - >> gr.cp_function(fun=lambda x: x[0] + x[1], var=2, out=1) + >> gr.cp_function(fun=lambda x0, x1: x0 + x1, var=2, out=1) >> gr.cp_bounds(x0=(-1, +1)) >> gr.cp_marginals( x1={"dist": "uniform", "loc": 0, "scale": 1}, @@ -466,7 +466,7 @@ def setUp(self): self.md_mixed = ( gr.Model() - >> gr.cp_function(fun=lambda x: x[0], var=3, out=1) + >> gr.cp_function(fun=lambda x0, x1, x2: x0, var=3, out=1) >> gr.cp_bounds(x2=(0, 1)) >> gr.cp_marginals( x0={"dist": "uniform", "loc": 0, "scale": 1}, @@ -582,14 +582,14 @@ def test_nls(self): ## Setup md_feat = ( gr.Model() - >> gr.cp_function(fun=lambda x: x[0] * x[1] + x[2], var=3, out=1,) + >> gr.cp_function(fun=lambda x0, x1, x2: x0 * x1 + x2, var=3, out=1,) >> gr.cp_bounds(x0=[-1, +1], x2=[0, 0]) >> gr.cp_marginals(x1=dict(dist="norm", loc=0, scale=1)) ) md_const = ( gr.Model() - >> gr.cp_function(fun=lambda x: x[0], var=1, out=1) + >> gr.cp_function(fun=lambda x0: x0, var=1, out=1) >> gr.cp_bounds(x0=(-1, +1)) ) @@ -653,13 +653,13 @@ def test_opt(self): md_bowl = ( gr.Model("Constrained bowl") >> gr.cp_function( - fun=lambda x: x[0] ** 2 + x[1] ** 2, var=["x", "y"], out=["f"], + fun=lambda x, y: x ** 2 + y ** 2, var=["x", "y"], out=["f"], ) >> gr.cp_function( - fun=lambda x: (x[0] + x[1] + 1), var=["x", "y"], out=["g1"], + fun=lambda x, y: (x + y + 1), var=["x", "y"], out=["g1"], ) >> gr.cp_function( - fun=lambda x: -(-x[0] + x[1] - np.sqrt(2 / 10)), + fun=lambda x, y: -(-x + y - np.sqrt(2 / 10)), var=["x", "y"], out=["g2"], ) diff --git a/tests/test_fit.py b/tests/test_fit.py index d35c746..ff62209 100644 --- a/tests/test_fit.py +++ b/tests/test_fit.py @@ -18,14 +18,11 @@ def setUp(self): ## Smooth model self.md_smooth = ( gr.Model() - # >> gr.cp_function(fun=lambda x: [x, x + 1], var=["x"], out=["y", "z"]) - >> gr.cp_vec_function( - fun=lambda df: gr.df_make(y=df.x, z=df.x + 1), var=["x"], out=["y", "z"] - ) + >> gr.cp_function(fun=lambda x: [x, x + 1], var=["x"], out=["y", "z"]) + # >> gr.cp_vec_function(fun=lambda df: gr.df_make(y=df.x, z=df.x + 1), var=["x"], out=["y", "z"]) >> gr.cp_marginals(x={"dist": "uniform", "loc": 0, "scale": 2}) >> gr.cp_copula_independence() ) - self.df_smooth = self.md_smooth >> gr.ev_df(df=pd.DataFrame(dict(x=[0, 1, 2]))) ## Tree model @@ -159,7 +156,7 @@ def test_nls(self): md_true = ( gr.Model() >> gr.cp_function( - fun=lambda x: a_true * np.exp(x[0] * c_true) + x[1], + fun=lambda x, epsilon: a_true * np.exp(x * c_true) + epsilon, var=["x", "epsilon"], out=["y"], ) @@ -174,7 +171,9 @@ def test_nls(self): md_param = ( gr.Model() >> gr.cp_function( - fun=lambda x: x[2] * np.exp(x[0] * x[1]), var=["x", "c", "a"], out=["y"] + fun=lambda x, c, a: a * np.exp(x * c), + var=["x", "c", "a"], + out=["y"] ) >> gr.cp_bounds(c=[0, 4], a=[0.1, 2.0]) ) @@ -191,7 +190,7 @@ def test_nls(self): md_unidet = ( gr.Model() >> gr.cp_function( - fun=lambda x: x[2] / x[3] * np.exp(x[0] * x[1]), + fun=lambda x, c, a, z: a / z * np.exp(x * c), var=["x", "c", "a", "z"], out=["y"], ) @@ -222,7 +221,9 @@ def test_nls(self): md_fixed = ( gr.Model() >> gr.cp_function( - fun=lambda x: x[2] * np.exp(x[0] * x[1]), var=["x", "c", "a"], out=["y"] + fun=lambda x, c, a: a * np.exp(x * c), + var=["x", "c", "a"], + out=["y"] ) >> gr.cp_bounds(c=[0, 4], a=[1, 1]) ) diff --git a/tests/test_mbi.py b/tests/test_mbi.py index f7a10a8..a835147 100644 --- a/tests/test_mbi.py +++ b/tests/test_mbi.py @@ -135,13 +135,13 @@ def test_comp_function(self): with self.assertRaises(ValueError): # Cycle by input self.md >> gr.cp_function( - fun=lambda x: x[0], var=["y0"], out=1 - ) >> gr.cp_function(fun=lambda x: x[0], var=1, out=["y0"]) + fun=lambda x0: x0, var=["y0"], out=1 + ) >> gr.cp_function(fun=lambda x0: x0, var=1, out=["y0"]) with self.assertRaises(ValueError): # Non-unique output self.md >> gr.cp_function( - fun=lambda x: x[0], var=1, out=["y0"] - ) >> gr.cp_function(fun=lambda x: x[0], var=1, out=["y0"]) + fun=lambda x0: x0, var=1, out=["y0"] + ) >> gr.cp_function(fun=lambda x0: x0, var=1, out=["y0"]) ## Check vectorized builder md_vec = gr.comp_vec_function( @@ -155,7 +155,7 @@ def test_comp_model(self): """Test model composition""" md_inner = ( gr.Model("inner") - >> gr.cp_function(fun=lambda x: x[0] + x[1], var=2, out=1) + >> gr.cp_function(fun=lambda x0, x1: x0 + x1, var=2, out=1) >> gr.cp_marginals(x0=dict(dist="norm", loc=0, scale=1)) >> gr.cp_copula_independence() ) diff --git a/tests/test_tail.py b/tests/test_tail.py index 8579f86..f7d0c54 100644 --- a/tests/test_tail.py +++ b/tests/test_tail.py @@ -18,7 +18,7 @@ def setUp(self): self.md = ( gr.Model() >> gr.cp_function( - fun=lambda x: self.beta_true * 2 - x[0] - np.sqrt(3) * x[1], + fun=lambda x0, x1: self.beta_true * 2 - x0 - np.sqrt(3) * x1, var=2, out=["g"], )