diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 535edd87d..33682e4e3 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -73,7 +73,7 @@ jobs: - python-version: "3.8" # avoid uploading coverage for full matrix use_coverage: true - python-version: "3.8" # this is to avoid installing optional dependencies in all environments - optional-dependencies: tensorflow sympy torch scikit-learn + optional-dependencies: tensorflow sympy torch scikit-learn smt env: # uncomment this to debug Qt initialization errors # QT_DEBUG_PLUGINS: '1' diff --git a/docs/source/chapt_surrogates/mlaiplugin.rst b/docs/source/chapt_surrogates/mlaiplugin.rst index eefeaa00f..cecddb279 100644 --- a/docs/source/chapt_surrogates/mlaiplugin.rst +++ b/docs/source/chapt_surrogates/mlaiplugin.rst @@ -24,21 +24,19 @@ launched. Keras SavedModel format (folder containing .pb data files), or serialized to an architecture dictionary (.json) with separately saved model weights (.h5). Additionally, this tool supports PyTorch models saved in the standard - format (.pt) and Scikit-learn models serialized in the standard Python pickle - format (.pkl). The examples folder contains demonstrative training and class - scripts for models containing no custom layer (see below for more information - on adding custom layers), a custom layer with a preset normalization option + format (.pt), and Scikit-learn and Surrogate Modeling Toolbox models serialized + in the standard Python pickle format (.pkl). The examples folder contains demonstrative training and class scripts for models containing no custom layer (see below for more information on adding custom layers), a custom layer with a preset normalization option and a custom layer with a custom normalization function, as well as models saved in all supported file formats. To use this tool, users must train and - export a machine leanring model and place the file in the appropriate folder + export a machine learning model and place the file in the appropriate folder *user_ml_ai_plugins* in the working directory, as shown below. Optionally, users may save Keras models with custom attributes to display on the node, such as variable labels and bounds. While training a Keras model with custom attributes is not required to use the plugin tool, users must provide the necessary class script if the Keras model does contain a custom object (see - below for further information on creating custom objects). PyTorch and - Scikit-learn models do not have this requirement and the class script does not - need to exist in the plugins folder. This model type is used in the same manner + below for further information on creating custom objects). PyTorch, Scikit-learn, and + Surrogate Modeling Toolbox models do not have this requirement and the class script + does not need to exist in the plugins folder. This model type is used in the same manner as Pymodel Plugins, per the workflow in Section :ref:`tutorial.surrogate.fs`. Custom Model Attributes @@ -86,6 +84,13 @@ https://scikit-learn.org/stable/index.html and further information on deep learn capabilities as well: https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html#sklearn.neural_network.MLPRegressor. +Surrogate Modeling Toolbox is an open-source Python package supporting a number of surrogate +modeling methods, including gradient-enhanced neural network (GENN) models. GENN models train +parameters by minimizing a modified Least Squares Estimator which accounts for partial derivative predictions, leading to better accuracy on fewer training points compared to non-gradient-enhanced models. Gradient methods are applicable when training use cases where +system data is generally known, such as continuous physics-based problems like aerodynamics. +If gradient data is not known, users may run a gradient generation tool provided within FOQUS and can consults the tool documentation here: :ref:`gengrad`. Users may find further information on GENN models within Surrogate Modeling Toolbox in +the documentation: https://smt.readthedocs.io/en/stable/_src_docs/surrogate_models/genn.html. + The examples files located in *FOQUS.examples.other_files.ML_AI_Plugin* show how users may train new models or re-save loaded models with a custom layer. @@ -258,8 +263,7 @@ to obtain the correct output values for the entered inputs. To run the models, copy the appropriate model files or folders ('h5_model.h5', 'saved_model/', 'json_model.json', 'json_model_weights.h5') and any custom layer scripts ('model_name.py') into the working directory folder 'user_ml_ai_models'. -As mentioned earlier, PyTorch and Scikit-learn models only require the model file -('pt_model.pt' or 'skl_model.pkl'). +As mentioned earlier, PyTorch, Scikit-learn and Surrogate Modeling Toolbox models only require the model file ('pt_model.pt', 'skl_model.pkl' or 'smt_model.pkl'). For example, the model name below is 'mea_column_model' and is saved in H5 format, and the files *FOQUS.examples.other_files.ML_AI_Plugin.TensorFlow_2-10_Models.mea_column_model.h5* and *FOQUS.examples.other_files.ML_AI_Plugin.mea_column_model.py* should be copied to diff --git a/docs/source/references.rst b/docs/source/references.rst index b2ef59860..3a102908c 100644 --- a/docs/source/references.rst +++ b/docs/source/references.rst @@ -59,4 +59,8 @@ S. Marcel, Y. Rodriguez, "Torchvision the machine-vision package of torch."" In .. _Buitinck_2013: -L. Buitinck, G. Louppe, M.Blondel, et al., "API design for machine learning software: experiences from the scikit-learn project." European Conference on Machine Learning and Principles and Practices of Knowledge Discovery in Databases, September 2013. \ No newline at end of file +L. Buitinck, G. Louppe, M.Blondel, et al., "API design for machine learning software: experiences from the scikit-learn project." European Conference on Machine Learning and Principles and Practices of Knowledge Discovery in Databases, September 2013. + +.. _Bouhlel_2019: + +M. A. Bouhlel, J. T. Hwang, N. Bartoli, et al., "A Python surrogate modeling framework with derivatives." Advances in Engineering Software, Vol 135 (pp. 102662), September 2019. \ No newline at end of file diff --git a/examples/other_files/ML_AI_Plugin/Other_MLAI_Models/mea_column_model_smt.pkl b/examples/other_files/ML_AI_Plugin/Other_MLAI_Models/mea_column_model_smt.pkl new file mode 100644 index 000000000..62d5a26e2 Binary files /dev/null and b/examples/other_files/ML_AI_Plugin/Other_MLAI_Models/mea_column_model_smt.pkl differ diff --git a/examples/other_files/ML_AI_Plugin/mea_column_model_training_smtgenn.py b/examples/other_files/ML_AI_Plugin/mea_column_model_training_smtgenn.py new file mode 100644 index 000000000..b181f875c --- /dev/null +++ b/examples/other_files/ML_AI_Plugin/mea_column_model_training_smtgenn.py @@ -0,0 +1,122 @@ +################################################################################# +# FOQUS Copyright (c) 2012 - 2023, by the software owners: Oak Ridge Institute +# for Science and Education (ORISE), TRIAD National Security, LLC., Lawrence +# Livermore National Security, LLC., The Regents of the University of +# California, through Lawrence Berkeley National Laboratory, Battelle Memorial +# Institute, Pacific Northwest Division through Pacific Northwest National +# Laboratory, Carnegie Mellon University, West Virginia University, Boston +# University, the Trustees of Princeton University, The University of Texas at +# Austin, URS Energy & Construction, Inc., et al. All rights reserved. +# +# Please see the file LICENSE.md for full copyright and license information, +# respectively. This file is also available online at the URL +# "https://github.com/CCSI-Toolset/FOQUS". +################################################################################# +import numpy as np +import pandas as pd +from smt.utils.neural_net.model import Model +import pickle +from types import SimpleNamespace + + +# Example follows the sequence below: +# 1) Code at end of file to import data and create model +# 2) Call create_model() to define inputs and outputs +# 3) Call CustomLayer to define network structure, which uses +# call() to define layer connections and get_config to attach +# attributes to CustomLayer class object +# 4) Back to create_model() to compile and train model +# 5) Back to code at end of file to save, load and test model + + +# method to create model +def create_model(x_train, z_train, grad_train): + + # already have X, Y and J, don't need to create and populate GENN() to + # load SMT data into Model(); GENN() doesn't support multiple outputs + + # Model() does support multiple outputs, so we just need to reshape the + # arrays so that Model() can use them + # we have x_train = (n_m, n_x), z_train = (n_m, n_y) and grad_train = (n_y, n_m, n_x) + n_m, n_x = np.shape(x_train) + _, n_y = np.shape(z_train) + + # check dimensions using grad_train + assert np.shape(grad_train) == (n_y, n_m, n_x) + + # reshape arrays + X = np.reshape(x_train, (n_x, n_m)) + Y = np.reshape(z_train, (n_y, n_m)) + J = np.reshape(grad_train, (n_y, n_x, n_m)) + + # set up and train model + + # Train neural net + model = Model.initialize( + X.shape[0], Y.shape[0], deep=2, wide=6 + ) # 2 hidden layers with 6 neurons each + model.train( + X=X, # input data + Y=Y, # output data + J=J, # gradient data + num_iterations=25, # number of optimizer iterations per mini-batch + mini_batch_size=int( + np.floor(n_m / 5) + ), # used to divide data into training batches (use for large data sets) + num_epochs=20, # number of passes through data + alpha=0.15, # learning rate that controls optimizer step size + beta1=0.99, # tuning parameter to control ADAM optimization + beta2=0.99, # tuning parameter to control ADAM optimization + lambd=0.1, # lambd = 0. = no regularization, lambd > 0 = regularization + gamma=0.0001, # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement + seed=None, # set to value for reproducibility + silent=True, # set to True to suppress training output + ) + + model.custom = SimpleNamespace( + input_labels=xlabels, + output_labels=zlabels, + input_bounds=xdata_bounds, + output_bounds=zdata_bounds, + normalized=False, # SMT GENN models are normalized during training, this should always be False + ) + + return model + + +# Main code + +# import data +data = pd.read_csv(r"MEA_carbon_capture_dataset_mimo.csv") +grad0_data = pd.read_csv(r"gradients_output0.csv", index_col=0) # ignore 1st col +grad1_data = pd.read_csv(r"gradients_output1.csv", index_col=0) # ignore 1st col + +xdata = data.iloc[:, :6] # there are 6 input variables/columns +zdata = data.iloc[:, 6:] # the rest are output variables/columns +xlabels = xdata.columns.tolist() # set labels as a list (default) from pandas +zlabels = zdata.columns.tolist() # is a set of IndexedDataSeries objects +xdata_bounds = {i: (xdata[i].min(), xdata[i].max()) for i in xdata} # x bounds +zdata_bounds = {j: (zdata[j].min(), zdata[j].max()) for j in zdata} # z bounds + +xmax, xmin = xdata.max(axis=0), xdata.min(axis=0) +zmax, zmin = zdata.max(axis=0), zdata.min(axis=0) +xdata, zdata = np.array(xdata), np.array(zdata) # (n_m, n_x) and (n_m, n_y) +gdata = np.stack([np.array(grad0_data), np.array(grad1_data)]) # (2, n_m, n_x) + +model_data = np.concatenate( + (xdata, zdata), axis=1 +) # Surrogate Modeling Toolbox requires a Numpy array as input + +# define x and z data, not used but will add to variable dictionary +xdata = model_data[:, :-2] +zdata = model_data[:, -2:] + +# create model +model = create_model(x_train=xdata, z_train=zdata, grad_train=gdata) + +with open("mea_column_model_smt.pkl", "wb") as file: + pickle.dump(model, file) + +# load model as pickle format +with open("mea_column_model_smt.pkl", "rb") as file: + loaded_model = pickle.load(file) diff --git a/foqus_lib/conftest.py b/foqus_lib/conftest.py index ec0e0ff01..783095f87 100644 --- a/foqus_lib/conftest.py +++ b/foqus_lib/conftest.py @@ -165,6 +165,7 @@ def install_ml_ai_model_files( ts_models_base_path / "mea_column_model_customnormform_json_weights.h5", other_models_base_path / "mea_column_model_customnormform_pytorch.pt", other_models_base_path / "mea_column_model_customnormform_scikitlearn.pkl", + other_models_base_path / "mea_column_model_smt.pkl", ]: shutil.copy2(path, models_dir) # unzip the zip file (could be generalized later to more files if needed) diff --git a/foqus_lib/framework/graph/node.py b/foqus_lib/framework/graph/node.py index 3fbcf038d..1d814ee73 100644 --- a/foqus_lib/framework/graph/node.py +++ b/foqus_lib/framework/graph/node.py @@ -149,26 +149,52 @@ def attempt_load_sklearn(try_imports=True): import sklearn import pickle - pickle_load = pickle.load + skl_pickle_load = pickle.load # throw warning if manually failed for test or if package actually not available except (AssertionError, ImportError, ModuleNotFoundError): # if sklearn is not available, create a proxy function that will # raise an exception whenever code tries to use `load()` at runtime - def pickle_load(*args, **kwargs): + def skl_pickle_load(*args, **kwargs): raise ModuleNotFoundError( f"`load()` was called with args={args}," "kwargs={kwargs} but `sklearn` is not available" ) - return pickle_load + return skl_pickle_load + + +def attempt_load_smt(try_imports=True): + try: + assert try_imports # if False will auto-trigger exceptions + # smt should be installed, but not required for non ML/AI models + import smt + import pickle + + smt_pickle_load = pickle.load + + # throw warning if manually failed for test or if package actually not available + except (AssertionError, ImportError, ModuleNotFoundError): + # if smt is not available, create a proxy function that will + # raise an exception whenever code tries to use `load()` at runtime + def smt_pickle_load(*args, **kwargs): + raise ModuleNotFoundError( + f"`load()` was called with args={args}," + "kwargs={kwargs} but `smt` is not available" + ) + + return smt_pickle_load # attempt to load optional dependenices for node script + +# pickle is loaded identically twice so that sklearn and smt can load or fail +# independently of each other load, json_load = attempt_load_tensorflow() parse, symbol, solve = attempt_load_sympy() torch_load, torch_tensor, torch_float = attempt_load_pytorch() -pickle_load = attempt_load_sklearn() +skl_pickle_load = attempt_load_sklearn() +smt_pickle_load = attempt_load_smt() # pylint: enable=import-error @@ -296,13 +322,21 @@ def __init__(self, model, trainer): model_input_size = self.model.n_features_in_ model_output_size = self.model.n_outputs_ + elif self.trainer == "smt": + # set the custom layer object + custom_layer = self.model.custom + # set the model input and output sizes + model_input_size = self.model._n_x + model_output_size = self.model._n_y + else: # this shouldn't occur, adding failsafe just in case raise AttributeError( "Unknown file type: " + self.trainer + ", this " "should not have occurred. Please contact the " "FOQUS developers if this error occurs; the " - "trainer should be set internally to `keras`, 'torch' or " - "`sklearn` and should not be able to take any other value." + "trainer should be set internally to `keras`, `torch`, " + "`sklearn` or `smt` and should not be able to take any other " + "value." ) self.custom_layer = ( @@ -615,13 +649,18 @@ def sub_symbols(i): self.scaled_outputs = self.model.predict( np.array(self.scaled_inputs, ndmin=2) )[0] + elif self.trainer == "smt": + self.scaled_outputs = self.model.evaluate( + np.reshape(np.array(self.scaled_inputs, ndmin=2), (self.model._n_x, 1)) + ) else: # this shouldn't occur, adding failsafe just in case raise AttributeError( "Unknown file type: " + self.trainer + ", this " "should not have occurred. Please contact the " "FOQUS developers if this error occurs; the " - "trainer should be set internally to `keras`, 'torch' or " - "`sklearn` and should not be able to take any other value." + "trainer should be set internally to `keras`, `torch`, " + "`sklearn` or `smt` and should not be able to take any other " + "value." ) outidx = 0 @@ -1173,7 +1212,7 @@ def setSim(self, newType=None, newModel=None, force=False, ids=None): elif os.path.exists( os.path.join(os.getcwd(), str(self.modelName) + ".pkl") ): - extension = ".pkl" # this is for Sci Kit Learn models + extension = ".pkl" # this is for Sci Kit Learn and SMT models else: # assume it's a folder with no extension extension = "" @@ -1184,9 +1223,41 @@ def setSim(self, newType=None, newModel=None, force=False, ids=None): elif ( extension == ".pkl" ): # use importlib/pickle loading syntax for SciKitLearn models - with open(str(self.modelName) + extension, "rb") as file: - self.model = pickle_load(file) - trainer = "sklearn" + pickle_loaded = ( + False # use a flag so we don't overload the model unnecessarily + ) + try: # try Scikitlearn first + if not pickle_loaded: + with open(str(self.modelName) + extension, "rb") as file: + self.model = skl_pickle_load(file) + pickle_loaded = True + except ModuleNotFoundError as e: + _logger.info( + e + ) # will print that sklearn is not installed but won't just fail + + try: # try SMT next + if not pickle_loaded: + with open(str(self.modelName) + extension, "rb") as file: + self.model = smt_pickle_load(file) + pickle_loaded = True + except ModuleNotFoundError as e: + _logger.info( + e + ) # will print that sklearn is not installed but won't just fail + + # now check which model type was unpickled + model_type_name = str(type(self.model)) + if "sklearn" in model_type_name: + trainer = "sklearn" + elif "smt" in model_type_name: + trainer = "smt" + else: # unsupported model type was unpickled + raise AttributeError( + f"Unknown model type: {model_type_name!r}. Only " + "sklearn MLPRegressor and smt GENN (Model) objects are " + "currently supported." + ) elif extension != ".json": # use standard Keras load method try: # see if custom layer script exists module = import_module(str(self.modelName)) # contains CustomLayer @@ -1726,9 +1797,41 @@ def runPymodelMLAI(self): elif ( extension == ".pkl" ): # use importlib/pickle loading syntax for SciKitLearn models - with open(str(self.modelName) + extension, "rb") as file: - self.model = pickle_load(file) - trainer = "sklearn" + pickle_loaded = ( + False # use a flag so we don't overload the model unnecessarily + ) + try: # try Scikitlearn first + if not pickle_loaded: + with open(str(self.modelName) + extension, "rb") as file: + self.model = skl_pickle_load(file) + pickle_loaded = True + except ModuleNotFoundError as e: + _logger.info( + e + ) # will print that sklearn is not installed but won't just fail + + try: # try SMT next + if not pickle_loaded: + with open(str(self.modelName) + extension, "rb") as file: + self.model = smt_pickle_load(file) + pickle_loaded = True + except ModuleNotFoundError as e: + _logger.info( + e + ) # will print that sklearn is not installed but won't just fail + + # now check which model type was unpickled + model_type_name = str(type(self.model)) + if "sklearn" in model_type_name: + trainer = "sklearn" + elif "smt" in model_type_name: + trainer = "smt" + else: # unsupported model type was unpickled + raise AttributeError( + f"Unknown model type: {model_type_name!r}. Only " + "sklearn MLPRegressor and smt GENN (Model) objects are " + "currently supported." + ) elif extension != ".json": # use standard Keras load method try: # see if custom layer script exists module = import_module(str(self.modelName)) # contains CustomLayer diff --git a/foqus_lib/framework/ml_ai_models/mlaiSearch.py b/foqus_lib/framework/ml_ai_models/mlaiSearch.py index 05aaaa214..e20ac1b05 100644 --- a/foqus_lib/framework/ml_ai_models/mlaiSearch.py +++ b/foqus_lib/framework/ml_ai_models/mlaiSearch.py @@ -15,8 +15,7 @@ """ mlaiSearch.py * This class looks for ml_ai model files and creates a list containing the - NN model names. The ml_ai models are identified by a certain string contained - in the file name. Files containing ml_ai models should have a .h5 extension. + NN model names. John Eslick, Carnegie Mellon University, 2014 """ diff --git a/foqus_lib/gui/tests/test_ml_ai.py b/foqus_lib/gui/tests/test_ml_ai.py index 122ca7f20..eb99d981f 100644 --- a/foqus_lib/gui/tests/test_ml_ai.py +++ b/foqus_lib/gui/tests/test_ml_ai.py @@ -266,3 +266,18 @@ def test_flowsheet_run_successful( text_when_success: str = "Finished Single Simulation... Success", ): assert text_when_success in statusbar_message + + def test_load_and_run_measmt(self, active_session, simnode): + pytest.importorskip("smt", reason="smt not installed") + pytest.importorskip("sympy", reason="sympy not installed") + # set sim name and confirm it's the correct model + simnode.simNameBox.setCurrentIndex(8) + assert simnode.simNameBox.currentText() == "mea_column_model_smt" + + def test_flowsheet_run_successful( + self, + trigger_flowsheet_run_action, + statusbar_message: str, + text_when_success: str = "Finished Single Simulation... Success", + ): + assert text_when_success in statusbar_message diff --git a/foqus_lib/unit_test/node_test.py b/foqus_lib/unit_test/node_test.py index e1fc2db0d..c7833a115 100644 --- a/foqus_lib/unit_test/node_test.py +++ b/foqus_lib/unit_test/node_test.py @@ -17,6 +17,7 @@ attempt_load_sympy, attempt_load_pytorch, attempt_load_sklearn, + attempt_load_smt, pymodel_ml_ai, Node, NodeEx, @@ -103,11 +104,20 @@ def test_import_sklearn_failure(self): # but it's good to test the exception anyways # method loaded from node module as * import - pickle_load = attempt_load_sklearn(try_imports=False) + skl_pickle_load = attempt_load_sklearn(try_imports=False) # check that the returned functions print the expected warnings with pytest.raises(ModuleNotFoundError): - pickle_load(None) + skl_pickle_load(None) + + def test_import_smt_failure(self): + + # method loaded from node module as * import + smt_pickle_load = attempt_load_smt(try_imports=False) + + # check that the returned functions print the expected warnings + with pytest.raises(ModuleNotFoundError): + smt_pickle_load(None) def test_import_tensorflow_success(self): # skip this test if tensorflow is not available @@ -160,12 +170,28 @@ def test_import_sklearn_success(self): pytest.importorskip("sklearn", reason="sklearn not installed") # method loaded from node module as * import - pickle_load = attempt_load_sklearn(try_imports=True) + skl_pickle_load = attempt_load_sklearn(try_imports=True) # check that the returned functions expect the correct input as a way # of confirming that the class (function) types are correct with pytest.raises(TypeError): - pickle_load(None) # should fail to find 'read' and 'readline' attributes + skl_pickle_load( + None + ) # should fail to find 'read' and 'readline' attributes + + def test_import_smt_success(self): + # skip this test if smt is not available + pytest.importorskip("smt", reason="smt not installed") + + # method loaded from node module as * import + smt_pickle_load = attempt_load_smt(try_imports=True) + + # check that the returned functions expect the correct input as a way + # of confirming that the class (function) types are correct + with pytest.raises(TypeError): + smt_pickle_load( + None + ) # should fail to find 'read' and 'readline' attributes # ---------------------------------------------------------------------------- @@ -403,6 +429,28 @@ def example_7(self, model_files): # custom layer with custom normalization form return model + @pytest.fixture(scope="function") + def example_8(self, model_files): # custom layer with smt model + # no tests using this fixture should run if smt is not installed + pytest.importorskip("smt", reason="sklearn not installed") + # the models are all loaded a single time, and copies of individual + # models are modified to test model exceptions + + smt_pickle_load = attempt_load_smt() # alias for load method + + # get model files from previously defined model_files pathlist + model_pkl = [ + path + for path in model_files + if str(path).endswith("mea_column_model_smt.pkl") + ] + + # has a custom layer + with open(model_pkl[0], "rb") as file: + model = smt_pickle_load(file) + + return model + # ---------------------------------------------------------------------------- # this set of tests builds and runs the pymodel class functionality @@ -465,6 +513,14 @@ def test_build_and_run_as_expected_7(self, example_7): test_pymodel = pymodel_ml_ai(example_7, trainer="sklearn") test_pymodel.run() + def test_build_and_run_as_expected_8(self, example_8): + # only run if smt is available; test run for smt example + pytest.importorskip("smt", reason="smt not installed") + # test that the loaded models run with no issues without modifications + # as in subsequent tests, an alias is created to preserve the fixture + test_pymodel = pymodel_ml_ai(example_8, trainer="smt") + test_pymodel.run() + def test_build_invalid_trainer_type(self, example_1): # note, this should never happen since users can never set this value with pytest.raises( @@ -473,7 +529,7 @@ def test_build_invalid_trainer_type(self, example_1): "notavalidtype, this should not have occurred. " "Please contact the FOQUS developers if this error " "occurs; the trainer should be set internally to " - "`keras`, 'torch' or `sklearn` and should not be " + "`keras`, `torch`, `sklearn` or `smt` and should not be " "able to take any other value.", ): test_pymodel = pymodel_ml_ai(example_1, trainer="notavalidtype") @@ -489,7 +545,7 @@ def test_run_invalid_trainer_type(self, example_1): "notavalidtype, this should not have occurred. " "Please contact the FOQUS developers if this error " "occurs; the trainer should be set internally to " - "`keras`, 'torch' or `sklearn` and should not be " + "`keras`, `torch`, `sklearn` or `smt` and should not be " "able to take any other value.", ): test_pymodel.run() @@ -1628,3 +1684,36 @@ def test_runPymodelMLAI_example7(self, node, model_files): assert getattr(node.gr.output[node.name][vkey], attribute) == getattr( v, attribute ) + + def test_runPymodelMLAI_example8(self, node, model_files): + # skip this test if smt is not available + pytest.importorskip("smt", reason="smt not installed") + # change directories + curdir = os.getcwd() + os.chdir(os.path.dirname(model_files[0])) + # manually add ML AI model to test + node.setSim(newModel="mea_column_model_smt", newType=5) + node.runCalc() # covers node.runMLAIPlugin() + os.chdir(curdir) + + inst = pymodel_ml_ai(node.model, trainer="smt") + inst.run() + + for attribute in [ + "dtype", + "min", + "max", + "default", + "unit", + "set", + "desc", + "tags", + ]: + for vkey, v in inst.inputs.items(): + assert getattr(node.gr.input[node.name][vkey], attribute) == getattr( + v, attribute + ) + for vkey, v in inst.outputs.items(): + assert getattr(node.gr.output[node.name][vkey], attribute) == getattr( + v, attribute + )