From 62f663d2f8fdb7cc3e3e085612ee4159233b2364 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Tue, 6 Apr 2021 14:22:00 -0600 Subject: [PATCH 01/51] restructure PostProcessor to use BaseEntities and BaseInterface Structures --- framework/Models/Factory.py | 39 +----- framework/Models/PostProcessors/Factory.py | 61 ++++++++ .../Models/PostProcessors/PostProcessor.py | 102 ++++---------- .../PostProcessors/PostProcessorInterface.py | 130 ++++++++++++++++++ 4 files changed, 221 insertions(+), 111 deletions(-) create mode 100644 framework/Models/PostProcessors/Factory.py create mode 100644 framework/Models/PostProcessors/PostProcessorInterface.py diff --git a/framework/Models/Factory.py b/framework/Models/Factory.py index 69ff15ed4f..ea60541f92 100644 --- a/framework/Models/Factory.py +++ b/framework/Models/Factory.py @@ -26,42 +26,7 @@ from .EnsembleModel import EnsembleModel from .HybridModels import HybridModel from .HybridModels import LogicalModel - -#### PostProcessors -from .PostProcessors import PostProcessor -from .PostProcessors import FTImporter -from .PostProcessors import BasicStatistics -from .PostProcessors import LimitSurface -from .PostProcessors import Metric -from .PostProcessors import ETImporter -from .PostProcessors.DataMining import DataMining -from .PostProcessors import SafestPoint -from .PostProcessors import ValueDuration -from .PostProcessors import SampleSelector -from .PostProcessors import ImportanceRank -from .PostProcessors import CrossValidation -from .PostProcessors import LimitSurfaceIntegral -from .PostProcessors import FastFourierTransform -from .PostProcessors.ExternalPostProcessor import ExternalPostProcessor -from .PostProcessors import InterfacedPostProcessor -from .PostProcessors.TopologicalDecomposition import TopologicalDecomposition -from .PostProcessors import DataClassifier -from .PostProcessors.ComparisonStatisticsModule import ComparisonStatistics -from .PostProcessors import RealizationAverager -from .PostProcessors.ParetoFrontierPostProcessor import ParetoFrontier -from .PostProcessors.MCSimporter import MCSImporter -from .PostProcessors import EconomicRatio -# from .PostProcessors import RavenOutput # deprecated for now - -## These utilize the optional prequisite library PySide, so don't error if they -## do not import appropriately. -try: - from .PostProcessors.TopologicalDecomposition import QTopologicalDecomposition - from .PostProcessors.DataMining import QDataMining - renaming = {'QTopologicalDecomposition': 'TopologicalDecomposition', - 'QDataMining': 'DataMining'} -except ImportError: - renaming = {} +from .PostProcessor import PostProcessor factory = EntityFactory('Model', needsRunInfo=True) factory.registerAllSubtypes(Model, alias=renaming) @@ -72,8 +37,6 @@ classType.generateValidateDict() classType.specializeValidateDict() -factory.registerType('External', ExternalPostProcessor) - def validate(className, role, what): """ This is the general interface for the validation of a model usage diff --git a/framework/Models/PostProcessors/Factory.py b/framework/Models/PostProcessors/Factory.py new file mode 100644 index 0000000000..123141bdfc --- /dev/null +++ b/framework/Models/PostProcessors/Factory.py @@ -0,0 +1,61 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Factory for generating the instances of the Models Module +""" + +from EntityFactoryBase import EntityFactory +from .PostProcessors import PostProcessorInterface +from .PostProcessors import FTImporter +from .PostProcessors import BasicStatistics +from .PostProcessors import LimitSurface +from .PostProcessors import Metric +from .PostProcessors import ETImporter +from .PostProcessors.DataMining import DataMining +from .PostProcessors import SafestPoint +from .PostProcessors import ValueDuration +from .PostProcessors import SampleSelector +from .PostProcessors import ImportanceRank +from .PostProcessors import CrossValidation +from .PostProcessors import LimitSurfaceIntegral +from .PostProcessors import FastFourierTransform +from .PostProcessors.ExternalPostProcessor import ExternalPostProcessor +from .PostProcessors import InterfacedPostProcessor +from .PostProcessors.TopologicalDecomposition import TopologicalDecomposition +from .PostProcessors import DataClassifier +from .PostProcessors.ComparisonStatisticsModule import ComparisonStatistics +from .PostProcessors import RealizationAverager +from .PostProcessors.ParetoFrontierPostProcessor import ParetoFrontier +from .PostProcessors.MCSimporter import MCSImporter +from .PostProcessors import EconomicRatio +## These utilize the optional prequisite library PySide, so don't error if they +## do not import appropriately. +try: + from .PostProcessors.TopologicalDecomposition import QTopologicalDecomposition + from .PostProcessors.DataMining import QDataMining + renaming = {'QTopologicalDecomposition': 'TopologicalDecomposition', + 'QDataMining': 'DataMining'} +except ImportError: + renaming = {} + +factory = EntityFactory('PostProcessor', needsRunInfo=True) +factory.registerAllSubtypes(Model, alias=renaming) + +## Here the class methods are called to fill the information about the usage of the classes +for className in factory.knownTypes(): + classType = factory.returnClass(className) + classType.generateValidateDict() + classType.specializeValidateDict() + +factory.registerType('External', ExternalPostProcessor) diff --git a/framework/Models/PostProcessors/PostProcessor.py b/framework/Models/PostProcessors/PostProcessor.py index 6b741deb39..a21fac2741 100644 --- a/framework/Models/PostProcessors/PostProcessor.py +++ b/framework/Models/PostProcessors/PostProcessor.py @@ -20,14 +20,14 @@ #External Modules------------------------------------------------------------------------------------ import os -import abc -import copy #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ import Runners from Models import Model from Decorators.Parallelization import Parallel +from utils import utils, InputTypes +from .PostProcessors import factory as interfaceFactory #Internal Modules End-------------------------------------------------------------------------------- class PostProcessor(Model): @@ -41,11 +41,17 @@ def getInputSpecification(cls): Method to get a reference to a class that specifies the input data for class cls. @ In, cls, the class for which we are retrieving the specification - @ Out, inputSpecification, InputData.ParameterInput, class to use for + @ Out, spec, InputData.ParameterInput, class to use for specifying input of cls. """ - inputSpecification = super(PostProcessor, cls).getInputSpecification() - return inputSpecification + spec = super().getInputSpecification() + validTypes = list(interfaceFactory.knownTypes()) + typeEnum = InputTypes.makeEnumType(validTypes) + for name in typeEnum: + pp = interfaceFactory.returnClass(name) + subSpec = pp.getInputSpecification() + spec.mergeSub(subSpec) + return spec @classmethod def generateValidateDict(cls): @@ -54,7 +60,7 @@ def generateValidateDict(cls): @ In, None @ Out, None """ - super(PostProcessor, cls).generateValidateDict() + super().generateValidateDict() @classmethod def specializeValidateDict(cls): @@ -99,26 +105,17 @@ def specializeValidateDict(cls): cls.validateDict['Function' ][0]['required' ] = False cls.validateDict['Function' ][0]['multiplicity'] = 1 - def __init__(self): + def __init__(self, runInfoDict): """ Constructor @ In, runInfoDict, dict, the dictionary containing the runInfo (read in the XML input file) @ Out, None """ - super().__init__() + super().__init__(self,runInfoDict) self.inputCheckInfo = [] # List of tuple, i.e input objects info [('name','type')] self.action = None # action - self.workingDir = '' # path for working directory self.printTag = 'POSTPROCESSOR MODEL' - self.outputDataset = False # True if the user wants to dump the outputs to dataset - self.validDataType = ['PointSet','HistorySet'] # The list of accepted types of DataObject - ## Currently, we have used both DataObject.addRealization and DataObject.load to - ## collect the PostProcessor returned outputs. DataObject.addRealization is used to - ## collect single realization, while DataObject.load is used to collect multiple realizations - ## However, the DataObject.load can not be directly used to collect single realization - ## One possible solution is all postpocessors return a list of realizations, and we only - ## use addRealization method to add the collections into the DataObjects - self.outputMultipleRealizations = False + self._pp = None def _handleInput(self, paramInput): """ @@ -126,7 +123,10 @@ def _handleInput(self, paramInput): @ In, paramInput, InputData.ParameterInput, the already parsed input. @ Out, None """ - Model._handleInput(self, paramInput) + super()._handleInput(self, paramInput) + reqType = paramInput.parameterValues['subType'] + self._pp = interfaceFactory.returnInstance (reqType) + self._pp._handleInput(paramInput) def initialize(self, runInfo, inputs, initDict=None): """ @@ -135,11 +135,9 @@ def initialize(self, runInfo, inputs, initDict=None): @ In, inputs, list, it is a list containing whatever is passed with an input role in the step @ In, initDict, dict, optional, dictionary of all objects available in the step is using this model """ - self.inputs = inputs - if 'stepName' in runInfo: - self.workingDir = os.path.join(runInfo['WorkingDir'],runInfo['stepName']) #generate current working dir - else: - self.workingDir = runInfo['WorkingDir'] + super().initialize(runInfo, inputs, initDict) + self._pp.initialize(runInfo, inputs, initDict) + settings = self._pp.getSettings() self.inputCheckInfo = [(inp.name, inp.type) for inp in inputs] def createNewInput(self,myInput,samplerType,**kwargs): @@ -154,26 +152,6 @@ def createNewInput(self,myInput,samplerType,**kwargs): """ return myInput - def inputToInternal(self, currentInput): - """ - Method to convert an input object into the internal format that is - understandable by the PostProcessor. - @ In, currentInput, object, an object that needs to be converted - @ Out, inputToInternal, list, list of current inputs - """ - return [(copy.deepcopy(currentInput))] - - @abc.abstractmethod - def run(self, input): - """ - This method executes the postprocessor action. - @ In, input, object, object containing the data to process. - Should avoid to use (inputToInternal output), and passing xarray directly/dataset - Possible inputs include: dict, xarray.Dataset, pd.DataFrame - @ Out, dict, xarray.Dataset, pd.DataFrame --> I think we can avoid collectoutput in the plugin pp - """ - pass - @Parallel() def evaluateSample(self, myInput, samplerType, kwargs): """ @@ -188,10 +166,10 @@ def evaluateSample(self, myInput, samplerType, kwargs): the second item will be the output of this model given the specified inputs """ - Input = self.createNewInput(myInput,samplerType, **kwargs) - if Input is not None and len(Input) == 0: - Input = None - returnValue = (Input, self.run(Input)) + ppInput = self.createNewInput(myInput,samplerType, **kwargs) + if ppInput is not None and len(ppInput) == 0: + ppInput = None + returnValue = (ppInput, self._pp.run(ppInput)) return returnValue def submit(self,myInput,samplerType,jobHandler,**kwargs): @@ -207,9 +185,9 @@ def submit(self,myInput,samplerType,jobHandler,**kwargs): @ Out, None """ kwargs['forceThreads'] = True - Model.submit(self,myInput, samplerType, jobHandler,**kwargs) + super().submit(self,myInput, samplerType, jobHandler,**kwargs) - def collectOutput(self,finishedJob,output,options=None): + def collectOutput(self, finishedJob, output, options=None): """ Method that collects the outputs from the "run" method of the PostProcessor @ In, finishedJob, InternalRunner object, instance of the run just finished @@ -218,31 +196,9 @@ def collectOutput(self,finishedJob,output,options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - if output.type not in self.validDataType: - self.raiseAnError(IOError, 'Output type', str(output.type), 'is not allowed!') outputCheckInfo = (output.name, output.type) if outputCheckInfo in self.inputCheckInfo: self.raiseAnError(IOError, 'DataObject',output.name,'is used as both input and output of', \ self.interface.printTag, 'This is not allowed! Please use different DataObjet as output') - evaluation = finishedJob.getEvaluation() - if isinstance(evaluation, Runners.Error): - self.raiseAnError(RuntimeError, "No available output to collect (run possibly not finished yet)") - outputRealization = evaluation[1] - - if output.type in ['PointSet','HistorySet']: - if self.outputDataset: - self.raiseAnError(IOError, "DataSet output is required, but the provided type of DataObject is", output.type) - self.raiseADebug('Dumping output in data object named ' + output.name) - if self.outputMultipleRealizations: - if 'dims' in outputRealization: - dims = outputRealization['dims'] - else: - dims = {} - print(outputRealization.keys()) - output.load(outputRealization['data'], style='dict', dims=dims) - else: - output.addRealization(outputRealization) - elif output.type in ['DataSet']: - self.raiseADebug('Dumping output in DataSet named ' + output.name) - output.load(outputRealization, style='dataset') + self._pp.collectOutput(finishedJob, output, options) diff --git a/framework/Models/PostProcessors/PostProcessorInterface.py b/framework/Models/PostProcessors/PostProcessorInterface.py new file mode 100644 index 0000000000..190e51714c --- /dev/null +++ b/framework/Models/PostProcessors/PostProcessorInterface.py @@ -0,0 +1,130 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Created on April 6, 2021 + +@author: wangc +""" + +#External Modules------------------------------------------------------------------------------------ +import copy +#External Modules End-------------------------------------------------------------------------------- + +#Internal Modules------------------------------------------------------------------------------------ +from BaseClasses import BaseInterface +from utils import InputTypes, InputData +#Internal Modules End-------------------------------------------------------------------------------- + +class PostProcessorInterface(BaseInterface): + """ + Base class for other postprocessor interfaces (i.e., BasicStatistics, ETImporter). + """ + @classmethod + def getInputSpecification(cls): + """ + Method to get a reference to a class that specifies the input data for + class cls. + @ In, cls, the class for which we are retrieving the specification + @ Out, inputSpecification, InputData.ParameterInput, class to use for + specifying input of cls. + """ + spec = super().getInputSpecification() + return spec + + def __init__(self): + """ + Constructor + @ In, runInfoDict, dict, the dictionary containing the runInfo (read in the XML input file) + @ Out, None + """ + super().__init__() + self.inputCheckInfo = [] # List of tuple, i.e input objects info [('name','type')] + self.action = None # action + self.workingDir = '' # path for working directory + self.printTag = 'PostProcessorInterface' + self.outputDataset = False # True if the user wants to dump the outputs to dataset + self.validDataType = ['PointSet','HistorySet'] # The list of accepted types of DataObject + ## Currently, we have used both DataObject.addRealization and DataObject.load to + ## collect the PostProcessor returned outputs. DataObject.addRealization is used to + ## collect single realization, while DataObject.load is used to collect multiple realizations + ## However, the DataObject.load can not be directly used to collect single realization + ## One possible solution is all postpocessors return a list of realizations, and we only + ## use addRealization method to add the collections into the DataObjects + self.outputMultipleRealizations = False + + def _handleInput(self, paramInput): + """ + Function to handle the common parts of the model parameter input. + @ In, paramInput, InputData.ParameterInput, the already parsed input. + @ Out, None + """ + supper()._handleInput(self, paramInput) + + def initialize(self, runInfo, inputs, initDict=None): + """ + Method to initialize the PostProcessor + @ In, runInfo, dict, it is the run info from the jobHandler + @ In, inputs, list, it is a list containing whatever is passed with an input role in the step + @ In, initDict, dict, optional, dictionary of all objects available in the step is using this model + """ + super().initialize() + self.inputs = inputs + if 'stepName' in runInfo: + self.workingDir = os.path.join(runInfo['WorkingDir'],runInfo['stepName']) #generate current working dir + else: + self.workingDir = runInfo['WorkingDir'] + + @abc.abstractmethod + def run(self, input): + """ + This method executes the postprocessor action. + @ In, input, object, object containing the data to process. + Should avoid to use (inputToInternal output), and passing xarray directly/dataset + Possible inputs include: dict, xarray.Dataset, pd.DataFrame + @ Out, dict, xarray.Dataset, pd.DataFrame --> I think we can avoid collectoutput in the plugin pp + """ + pass + + def collectOutput(self,finishedJob,output,options=None): + """ + Method that collects the outputs from the "run" method of the PostProcessor + @ In, finishedJob, InternalRunner object, instance of the run just finished + @ In, output, "DataObjects" object, output where the results of the calculation needs to be stored + @ In, options, dict, optional, not used in PostProcessor. + dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) + @ Out, None + """ + if output.type not in self.validDataType: + self.raiseAnError(IOError, 'Output type', str(output.type), 'is not allowed!') + evaluation = finishedJob.getEvaluation() + if isinstance(evaluation, Runners.Error): + self.raiseAnError(RuntimeError, "No available output to collect (run possibly not finished yet)") + outputRealization = evaluation[1] + + if output.type in ['PointSet','HistorySet']: + if self.outputDataset: + self.raiseAnError(IOError, "DataSet output is required, but the provided type of DataObject is", output.type) + self.raiseADebug('Dumping output in data object named ' + output.name) + if self.outputMultipleRealizations: + if 'dims' in outputRealization: + dims = outputRealization['dims'] + else: + dims = {} + print(outputRealization.keys()) + output.load(outputRealization['data'], style='dict', dims=dims) + else: + output.addRealization(outputRealization) + elif output.type in ['DataSet']: + self.raiseADebug('Dumping output in DataSet named ' + output.name) + output.load(outputRealization, style='dataset') From 66ffd6f4b3d7a84abfe7773a1b3f7e604cab2018 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Tue, 6 Apr 2021 17:16:58 -0600 Subject: [PATCH 02/51] updates --- .../{PostProcessors => }/PostProcessor.py | 0 framework/Models/PostProcessors/ETImporter.py | 12 ++-- framework/Models/PostProcessors/FTImporter.py | 12 ++-- framework/Models/PostProcessors/Factory.py | 50 ++++++++--------- .../PostProcessors/PostProcessorInterface.py | 3 +- framework/Models/PostProcessors/__init__.py | 55 +++++++++++++++++++ framework/Models/__init__.py | 40 +------------- 7 files changed, 95 insertions(+), 77 deletions(-) rename framework/Models/{PostProcessors => }/PostProcessor.py (100%) create mode 100644 framework/Models/PostProcessors/__init__.py diff --git a/framework/Models/PostProcessors/PostProcessor.py b/framework/Models/PostProcessor.py similarity index 100% rename from framework/Models/PostProcessors/PostProcessor.py rename to framework/Models/PostProcessor.py diff --git a/framework/Models/PostProcessors/ETImporter.py b/framework/Models/PostProcessors/ETImporter.py index 654c31ed9b..74a1646f30 100644 --- a/framework/Models/PostProcessors/ETImporter.py +++ b/framework/Models/PostProcessors/ETImporter.py @@ -26,7 +26,7 @@ #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import InputData, InputTypes from utils import xmlUtils as xmlU from utils import utils @@ -35,7 +35,7 @@ #Internal Modules End----------------------------------------------------------- -class ETImporter(PostProcessor): +class ETImporter(PostProcessorInterface): """ This is the base class of the PostProcessor that imports Event-Trees (ETs) into RAVEN as a PointSet """ @@ -68,7 +68,7 @@ class cls. @ Out, inputSpecification, InputData.ParameterInput, class to use for specifying input of cls. """ - inputSpecification = super(ETImporter, cls).getInputSpecification() + inputSpecification = super().getInputSpecification() inputSpecification.addSub(InputData.parameterInputFactory("fileFormat", contentType=InputTypes.StringType)) inputSpecification.addSub(InputData.parameterInputFactory("expand" , contentType=InputTypes.BoolType)) return inputSpecification @@ -81,7 +81,7 @@ def initialize(self, runInfo, inputs, initDict) : @ In, initDict, dict, dictionary with initialization options @ Out, None """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) def _handleInput(self, paramInput): """ @@ -89,7 +89,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) fileFormat = paramInput.findFirst('fileFormat') self.fileFormat = fileFormat.value if self.fileFormat not in self.allowedFormats: @@ -118,4 +118,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - PostProcessor.collectOutput(self, finishedJob, output, options=options) + super().collectOutput(self, finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/FTImporter.py b/framework/Models/PostProcessors/FTImporter.py index 187cc7abb5..1bdb1b1820 100644 --- a/framework/Models/PostProcessors/FTImporter.py +++ b/framework/Models/PostProcessors/FTImporter.py @@ -25,7 +25,7 @@ #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import InputData, InputTypes from utils import xmlUtils as xmlU from utils import utils @@ -33,7 +33,7 @@ import Files #Internal Modules End----------------------------------------------------------- -class FTImporter(PostProcessor): +class FTImporter(PostProcessorInterface): """ This is the base class of the postprocessor that imports Fault-Trees (FTs) into RAVEN as a PointSet """ @@ -46,7 +46,7 @@ class cls. @ Out, inputSpecification, InputData.ParameterInput, class to use for specifying input of cls. """ - inputSpecification = super(FTImporter, cls).getInputSpecification() + inputSpecification = super().getInputSpecification() fileAllowedFormats = InputTypes.makeEnumType("FTFileFormat", "FTFileFormatType", ["OpenPSA"]) inputSpecification.addSub(InputData.parameterInputFactory("fileFormat", contentType=fileAllowedFormats)) inputSpecification.addSub(InputData.parameterInputFactory("topEventID", contentType=InputTypes.StringType)) @@ -77,7 +77,7 @@ def initialize(self, runInfo, inputs, initDict) : @ In, initDict, dict, dictionary with initialization options @ Out, None """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) def _handleInput(self, paramInput): """ @@ -85,7 +85,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) fileFormat = paramInput.findFirst('fileFormat') self.fileFormat = fileFormat.value topEventID = paramInput.findFirst('topEventID') @@ -111,4 +111,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - PostProcessor.collectOutput(self, finishedJob, output, options=options) + super().collectOutput(self, finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/Factory.py b/framework/Models/PostProcessors/Factory.py index 123141bdfc..f801a34216 100644 --- a/framework/Models/PostProcessors/Factory.py +++ b/framework/Models/PostProcessors/Factory.py @@ -16,34 +16,34 @@ """ from EntityFactoryBase import EntityFactory -from .PostProcessors import PostProcessorInterface -from .PostProcessors import FTImporter -from .PostProcessors import BasicStatistics -from .PostProcessors import LimitSurface -from .PostProcessors import Metric -from .PostProcessors import ETImporter -from .PostProcessors.DataMining import DataMining -from .PostProcessors import SafestPoint -from .PostProcessors import ValueDuration -from .PostProcessors import SampleSelector -from .PostProcessors import ImportanceRank -from .PostProcessors import CrossValidation -from .PostProcessors import LimitSurfaceIntegral -from .PostProcessors import FastFourierTransform -from .PostProcessors.ExternalPostProcessor import ExternalPostProcessor -from .PostProcessors import InterfacedPostProcessor -from .PostProcessors.TopologicalDecomposition import TopologicalDecomposition -from .PostProcessors import DataClassifier -from .PostProcessors.ComparisonStatisticsModule import ComparisonStatistics -from .PostProcessors import RealizationAverager -from .PostProcessors.ParetoFrontierPostProcessor import ParetoFrontier -from .PostProcessors.MCSimporter import MCSImporter -from .PostProcessors import EconomicRatio +from .PostProcessorInterface import PostProcessorInterface +from .FTImporter import FTImporter +from .BasicStatistics import BasicStatistics +from .LimitSurface import LimitSurface +from .Metric import Metric +from .ETImporter import ETImporter +from .DataMining import DataMining +from .SafestPoint import SafestPoint +from .ValueDuration import ValueDuration +from .SampleSelector import SampleSelector +from .ImportanceRank import ImportanceRank +from .CrossValidation import CrossValidation +from .LimitSurfaceIntegral import LimitSurfaceIntegral +from .FastFourierTransform import FastFourierTransform +from .ExternalPostProcessor import ExternalPostProcessor +from .InterfacedPostProcessor import InterfacedPostProcessor +from .TopologicalDecomposition import TopologicalDecomposition +from .DataClassifier import DataClassifier +from .ComparisonStatisticsModule import ComparisonStatistics +from .RealizationAverager import RealizationAverager +from .ParetoFrontierPostProcessor import ParetoFrontier +from .MCSimporter import MCSImporter +from .EconomicRatio import EconomicRatio ## These utilize the optional prequisite library PySide, so don't error if they ## do not import appropriately. try: - from .PostProcessors.TopologicalDecomposition import QTopologicalDecomposition - from .PostProcessors.DataMining import QDataMining + from .TopologicalDecomposition import QTopologicalDecomposition + from .DataMining import QDataMining renaming = {'QTopologicalDecomposition': 'TopologicalDecomposition', 'QDataMining': 'DataMining'} except ImportError: diff --git a/framework/Models/PostProcessors/PostProcessorInterface.py b/framework/Models/PostProcessors/PostProcessorInterface.py index 190e51714c..791856f760 100644 --- a/framework/Models/PostProcessors/PostProcessorInterface.py +++ b/framework/Models/PostProcessors/PostProcessorInterface.py @@ -19,6 +19,7 @@ #External Modules------------------------------------------------------------------------------------ import copy +import abc #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ @@ -69,7 +70,7 @@ def _handleInput(self, paramInput): @ In, paramInput, InputData.ParameterInput, the already parsed input. @ Out, None """ - supper()._handleInput(self, paramInput) + super()._handleInput(self, paramInput) def initialize(self, runInfo, inputs, initDict=None): """ diff --git a/framework/Models/PostProcessors/__init__.py b/framework/Models/PostProcessors/__init__.py new file mode 100644 index 0000000000..f2839cc2ba --- /dev/null +++ b/framework/Models/PostProcessors/__init__.py @@ -0,0 +1,55 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + The Models module for building, running, and simulating things in RAVEN. + + Created on May 9, 2017 + @author: maljdp +""" +from .PostProcessorInterface import PostProcessorInterface +from .FTImporter import FTImporter +from .BasicStatistics import BasicStatistics +from .LimitSurface import LimitSurface +from .Metric import Metric +from .ETImporter import ETImporter +from .DataMining import DataMining +from .SafestPoint import SafestPoint +from .ValueDuration import ValueDuration +from .SampleSelector import SampleSelector +from .ImportanceRank import ImportanceRank +from .CrossValidation import CrossValidation +from .LimitSurfaceIntegral import LimitSurfaceIntegral +from .FastFourierTransform import FastFourierTransform +from .ExternalPostProcessor import ExternalPostProcessor +from .InterfacedPostProcessor import InterfacedPostProcessor +from .TopologicalDecomposition import TopologicalDecomposition +from .DataClassifier import DataClassifier +from .ComparisonStatisticsModule import ComparisonStatistics +from .RealizationAverager import RealizationAverager +from .ParetoFrontierPostProcessor import ParetoFrontier +from .MCSimporter import MCSImporter +from .EconomicRatio import EconomicRatio +## These utilize the optional prequisite library PySide, so don't error if they +## do not import appropriately. +additionalModules = [] +try: + from .TopologicalDecomposition import QTopologicalDecomposition + from .DataMining import QDataMining + additionalModules.append(QTopologicalDecomposition) + additionalModules.append(QDataMining) +except ImportError: + pass +## [ Add new class here ] + +from .Factory import factory diff --git a/framework/Models/__init__.py b/framework/Models/__init__.py index a2388aa187..2aeebb66b2 100644 --- a/framework/Models/__init__.py +++ b/framework/Models/__init__.py @@ -29,45 +29,7 @@ from .EnsembleModel import EnsembleModel from .HybridModels import HybridModel from .HybridModels import LogicalModel - -#### PostProcessors -from .PostProcessors import PostProcessor -from .PostProcessors import FTImporter -from .PostProcessors import LimitSurface -from .PostProcessors import BasicStatistics -from .PostProcessors import Metric -from .PostProcessors import ETImporter -from .PostProcessors.DataMining import DataMining -from .PostProcessors import SafestPoint -from .PostProcessors import ValueDuration -from .PostProcessors import SampleSelector -from .PostProcessors import ImportanceRank -from .PostProcessors import CrossValidation -from .PostProcessors import LimitSurfaceIntegral -from .PostProcessors import FastFourierTransform -from .PostProcessors.ExternalPostProcessor import ExternalPostProcessor -from .PostProcessors import InterfacedPostProcessor -from .PostProcessors.TopologicalDecomposition import TopologicalDecomposition -from .PostProcessors import DataClassifier -from .PostProcessors.ComparisonStatisticsModule import ComparisonStatistics -from .PostProcessors import RealizationAverager -from .PostProcessors.ParetoFrontierPostProcessor import ParetoFrontier -from .PostProcessors.MCSimporter import MCSImporter -from .PostProcessors import EconomicRatio -# from .PostProcessors import RavenOutput # deprecated for now - -additionalModules = [] -## These utilize the optional prequisite library PySide, so don't error if they -## do not import appropriately. -try: - from .PostProcessors.TopologicalDecomposition import QTopologicalDecomposition - from .PostProcessors.DataMining import QDataMining - additionalModules.append(QTopologicalDecomposition) - additionalModules.append(QDataMining) -except ImportError: - ## User most likely does not have PySide installed and working - pass - +from .PostProcessor import PostProcessor ## [ Add new class here ] From 8e5c79a614fc472b51027cbb2a4ab6e1c518b618 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Tue, 6 Apr 2021 21:01:32 -0600 Subject: [PATCH 03/51] update --- framework/BaseClasses/BaseInterface.py | 10 ++++++++++ framework/EntityFactoryBase.py | 5 +++-- framework/Models/Factory.py | 2 +- framework/Models/PostProcessor.py | 15 +++++++-------- .../Models/PostProcessors/BasicStatistics.py | 10 +++++----- .../PostProcessors/ComparisonStatisticsModule.py | 9 ++++----- .../Models/PostProcessors/CrossValidation.py | 10 ++++------ framework/Models/PostProcessors/DataClassifier.py | 12 ++++++------ framework/Models/PostProcessors/DataMining.py | 14 +++++--------- framework/Models/PostProcessors/ETImporter.py | 8 +++----- framework/Models/PostProcessors/ETStructure.py | 3 --- framework/Models/PostProcessors/EconomicRatio.py | 6 ++---- .../PostProcessors/ExternalPostProcessor.py | 10 ++++------ framework/Models/PostProcessors/FTGate.py | 4 ---- framework/Models/PostProcessors/Factory.py | 10 ++-------- .../Models/PostProcessors/FastFourierTransform.py | 8 +++----- framework/Models/PostProcessors/ImportanceRank.py | 10 ++++------ .../PostProcessors/InterfacedPostProcessor.py | 13 +++++-------- framework/Models/PostProcessors/LimitSurface.py | 14 ++++++-------- .../Models/PostProcessors/LimitSurfaceIntegral.py | 8 ++++---- framework/Models/PostProcessors/MCSimporter.py | 12 ++++++------ framework/Models/PostProcessors/Metric.py | 10 ++++------ .../PostProcessors/ParetoFrontierPostProcessor.py | 8 ++++---- .../PostProcessors/PostProcessorInterface.py | 4 +++- .../Models/PostProcessors/RealizationAverager.py | 10 ++++------ framework/Models/PostProcessors/SafestPoint.py | 8 +++----- framework/Models/PostProcessors/SampleSelector.py | 12 +++--------- .../PostProcessors/TopologicalDecomposition.py | 15 +++++---------- framework/Models/PostProcessors/ValueDuration.py | 8 +++----- framework/PostProcessorInterfaceBaseClass.py | 2 -- framework/Samplers/AdaptiveMonteCarlo.py | 4 ++-- framework/Simulation.py | 6 +----- 32 files changed, 116 insertions(+), 164 deletions(-) diff --git a/framework/BaseClasses/BaseInterface.py b/framework/BaseClasses/BaseInterface.py index 8a33b13b27..bd406d4cfd 100644 --- a/framework/BaseClasses/BaseInterface.py +++ b/framework/BaseClasses/BaseInterface.py @@ -94,6 +94,16 @@ def handleInput(self, paramInput, variableGroups=None, globalAttributes=None): self.raiseADebug('------Reading Completed for:') self.printMe() + def initialize(self, *args, **kwargs): + """ + provide a basic reading capability from the xml input file for what is common to all types in the simulation than calls _handleInput + that needs to be overloaded and used as API. Each type supported by the simulation should have: name (xml attribute), type (xml tag), + Set up this interface for a particular activity + @ In, args, list, positional arguments + @ In, kwargs, dict, keyword arguments + """ + pass + def _readMoreXML(self,xmlNode): """ Function to read the portion of the xml input that belongs to this specialized class diff --git a/framework/EntityFactoryBase.py b/framework/EntityFactoryBase.py index 4ee2974cd9..5d1cde5bdb 100644 --- a/framework/EntityFactoryBase.py +++ b/framework/EntityFactoryBase.py @@ -34,6 +34,7 @@ def __init__(self, name=None, needsRunInfo=False, returnInputParameter=False): @ In, returnInputParameter, bool, optional, whether this entity can use inputParams (otherwise xml) @ Out, None """ + super().__init__() self.name = None # name of entity, e.g. Sampler self.needsRunInfo = needsRunInfo # whether entity needs run info self.returnInputParameter = returnInputParameter # use xml or inputParams @@ -108,7 +109,7 @@ def returnClass(self, Type): # otherwise, error msg = f'"{self.name}" module does not recognize type "{Type}"; ' msg += f'known types are: {self.knownTypes()}' - caller.raiseAnError(NameError, msg) + self.raiseAnError(NameError, msg) def returnInstance(self, Type, **kwargs): """ @@ -143,4 +144,4 @@ def collectInputSpecs(self, base): # """ # if self._pluginFactory is not None and '.' in typeName: # pluginName, remainder = typeName.split('.', maxsplit=1) - # new = self._pluginFactory.finishLoadPlugin(pluginName) \ No newline at end of file + # new = self._pluginFactory.finishLoadPlugin(pluginName) diff --git a/framework/Models/Factory.py b/framework/Models/Factory.py index ea60541f92..e41913d566 100644 --- a/framework/Models/Factory.py +++ b/framework/Models/Factory.py @@ -29,7 +29,7 @@ from .PostProcessor import PostProcessor factory = EntityFactory('Model', needsRunInfo=True) -factory.registerAllSubtypes(Model, alias=renaming) +factory.registerAllSubtypes(Model) # #here the class methods are called to fill the information about the usage of the classes for className in factory.knownTypes(): diff --git a/framework/Models/PostProcessor.py b/framework/Models/PostProcessor.py index a21fac2741..5377efd0ce 100644 --- a/framework/Models/PostProcessor.py +++ b/framework/Models/PostProcessor.py @@ -46,8 +46,8 @@ class cls. """ spec = super().getInputSpecification() validTypes = list(interfaceFactory.knownTypes()) - typeEnum = InputTypes.makeEnumType(validTypes) - for name in typeEnum: + typeEnum = InputTypes.makeEnumType('PostProcessor', 'PostProcessorType', validTypes) + for name in validTypes: pp = interfaceFactory.returnClass(name) subSpec = pp.getInputSpecification() spec.mergeSub(subSpec) @@ -105,13 +105,13 @@ def specializeValidateDict(cls): cls.validateDict['Function' ][0]['required' ] = False cls.validateDict['Function' ][0]['multiplicity'] = 1 - def __init__(self, runInfoDict): + def __init__(self ): """ Constructor - @ In, runInfoDict, dict, the dictionary containing the runInfo (read in the XML input file) + @ In, None @ Out, None """ - super().__init__(self,runInfoDict) + super().__init__() self.inputCheckInfo = [] # List of tuple, i.e input objects info [('name','type')] self.action = None # action self.printTag = 'POSTPROCESSOR MODEL' @@ -123,7 +123,7 @@ def _handleInput(self, paramInput): @ In, paramInput, InputData.ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) reqType = paramInput.parameterValues['subType'] self._pp = interfaceFactory.returnInstance (reqType) self._pp._handleInput(paramInput) @@ -137,7 +137,6 @@ def initialize(self, runInfo, inputs, initDict=None): """ super().initialize(runInfo, inputs, initDict) self._pp.initialize(runInfo, inputs, initDict) - settings = self._pp.getSettings() self.inputCheckInfo = [(inp.name, inp.type) for inp in inputs] def createNewInput(self,myInput,samplerType,**kwargs): @@ -185,7 +184,7 @@ def submit(self,myInput,samplerType,jobHandler,**kwargs): @ Out, None """ kwargs['forceThreads'] = True - super().submit(self,myInput, samplerType, jobHandler,**kwargs) + super().submit(myInput, samplerType, jobHandler,**kwargs) def collectOutput(self, finishedJob, output, options=None): """ diff --git a/framework/Models/PostProcessors/BasicStatistics.py b/framework/Models/PostProcessors/BasicStatistics.py index 54740efcfc..746a69926d 100644 --- a/framework/Models/PostProcessors/BasicStatistics.py +++ b/framework/Models/PostProcessors/BasicStatistics.py @@ -29,14 +29,14 @@ #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import utils from utils import InputData, InputTypes from utils import mathUtils import Files #Internal Modules End----------------------------------------------------------- -class BasicStatistics(PostProcessor): +class BasicStatistics(PostProcessorInterface): """ BasicStatistics filter class. It computes all the most popular statistics """ @@ -80,7 +80,7 @@ class cls. specifying input of cls. """ ## This will replace the lines above - inputSpecification = super(BasicStatistics, cls).getInputSpecification() + inputSpecification = super().getInputSpecification() for scalar in cls.scalarVals: scalarSpecification = InputData.parameterInputFactory(scalar, contentType=InputTypes.StringListType) @@ -257,7 +257,7 @@ def initialize(self, runInfo, inputs, initDict): #for backward compatibility, compile the full list of parameters used in Basic Statistics calculations self.parameters['targets'] = list(self.allUsedParams) - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) inputObj = inputs[-1] if type(inputs) == list else inputs if inputObj.type == 'HistorySet': self.dynamic = True @@ -1305,4 +1305,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - PostProcessor.collectOutput(self, finishedJob, output, options=options) + super().collectOutput(self, finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/ComparisonStatisticsModule.py b/framework/Models/PostProcessors/ComparisonStatisticsModule.py index cf79e3107f..a4e95bfc27 100644 --- a/framework/Models/PostProcessors/ComparisonStatisticsModule.py +++ b/framework/Models/PostProcessors/ComparisonStatisticsModule.py @@ -16,7 +16,6 @@ @author: alfoa """ -from __future__ import division, print_function , unicode_literals, absolute_import #External Modules------------------------------------------------------------------------------------ import numpy as np import math @@ -24,7 +23,7 @@ #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import utils from utils import mathUtils from utils import InputData, InputTypes @@ -258,7 +257,7 @@ def _getPDFandCDFfromData(dataName, data, csv, methodInfo, interpolation, return dataStats, cdfFunc, pdfFunc -class ComparisonStatistics(PostProcessor): +class ComparisonStatistics(PostProcessorInterface): """ ComparisonStatistics is to calculate statistics that compare two different codes or code to experimental data. @@ -355,7 +354,7 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) def _handleInput(self, paramInput): """ @@ -363,7 +362,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for outer in paramInput.subparts: if outer.getName() == 'compare': compareGroup = ComparisonStatistics.CompareGroup() diff --git a/framework/Models/PostProcessors/CrossValidation.py b/framework/Models/PostProcessors/CrossValidation.py index 3d5084d090..b4ea563326 100644 --- a/framework/Models/PostProcessors/CrossValidation.py +++ b/framework/Models/PostProcessors/CrossValidation.py @@ -16,7 +16,6 @@ @author: wangc """ -from __future__ import division, print_function , unicode_literals, absolute_import #External Modules------------------------------------------------------------------------------------ import numpy as np import os @@ -25,8 +24,7 @@ import copy #External Modules End-------------------------------------------------------------------------------- -#Internal Modules------------------------------------------------------------------------------------ -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import utils from utils import InputData, InputTypes import Files @@ -34,7 +32,7 @@ import CrossValidations #Internal Modules End-------------------------------------------------------------------------------- -class CrossValidation(PostProcessor): +class CrossValidation(PostProcessorInterface): """ Cross Validation class. """ @@ -108,7 +106,7 @@ def initialize(self, runInfo, inputs, initDict=None) : @ In, inputs, list, list of inputs @ In, initDict, dict, dictionary with initialization options """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) for metricIn in self.assemblerDict['Metric']: if metricIn[2] in self.metricsDict.keys(): @@ -124,7 +122,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) self.initializationOptionDict = {} scoreList = ['maximum', 'average', 'median'] cvNode = paramInput.findFirst('SciKitLearn') diff --git a/framework/Models/PostProcessors/DataClassifier.py b/framework/Models/PostProcessors/DataClassifier.py index 450de06042..fc70443be9 100644 --- a/framework/Models/PostProcessors/DataClassifier.py +++ b/framework/Models/PostProcessors/DataClassifier.py @@ -20,9 +20,9 @@ import numpy as np from utils import InputData, InputTypes, utils -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface -class DataClassifier(PostProcessor): +class DataClassifier(PostProcessorInterface): """ This Post-Processor performs data classification based on given classifier. In order to use this interface post-processor, the users need to provide @@ -40,7 +40,7 @@ class cls. @ Out, inputSpecification, InputData.ParameterInput, class to use for specifying input of cls. """ - inputSpecification = super(DataClassifier, cls).getInputSpecification() + inputSpecification = super().getInputSpecification() VariableInput = InputData.parameterInputFactory("variable", contentType=InputTypes.StringType) VariableInput.addParam("name", InputTypes.StringType, True) FunctionInput = InputData.parameterInputFactory("Function", contentType=InputTypes.StringType) @@ -77,7 +77,7 @@ def initialize(self, runInfo, inputs, initDict=None): @ In, initDict, dict, optional, dictionary with initialization options @ Out, None """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) for key, val in self.mapping.items(): self.funcDict[key] = self.retrieveObjectFromAssemblerDict('Function',val[1]) @@ -87,7 +87,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for child in paramInput.subparts: if child.getName() == 'variable': func = child.findFirst('Function') @@ -222,4 +222,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - PostProcessor.collectOutput(self, finishedJob, output, options=options) + super().collectOutput(self, finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/DataMining.py b/framework/Models/PostProcessors/DataMining.py index 8cfed8cb54..12a747fd23 100644 --- a/framework/Models/PostProcessors/DataMining.py +++ b/framework/Models/PostProcessors/DataMining.py @@ -15,7 +15,6 @@ Created on July 10, 2013 @author: alfoa """ -from __future__ import division, print_function , unicode_literals, absolute_import #External Modules--------------------------------------------------------------- import numpy as np import copy @@ -24,7 +23,7 @@ #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import utils, mathUtils from utils import InputData, InputTypes import Files @@ -32,7 +31,7 @@ import MetricDistributor #Internal Modules End----------------------------------------------------------- -class DataMining(PostProcessor): +class DataMining(PostProcessorInterface): """ DataMiningPostProcessor class. It will apply the specified KDD algorithms in the models to a dataset, each specified algorithm's output can be loaded to @@ -383,7 +382,7 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) if "SolutionExport" in initDict: self.solutionExport = initDict["SolutionExport"] if "PreProcessor" in self.assemblerDict: @@ -399,7 +398,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) ## By default, we want to name the 'labels' by the name of this ## postprocessor, but that name is not available before processing the XML ## At this point, we have that information @@ -904,10 +903,7 @@ def __runTemporalSciKitLearn(self, Input): __QtAvailable = False if __QtAvailable: - class mQDataMining(type(DataMining), type(qtc.QObject)): - pass - - class QDataMining(DataMining, qtc.QObject, metaclass=mQDataMining): + class QDataMining(DataMining, qtc.QObject): """ DataMining class - Computes a hierarchical clustering from an input point cloud consisting of an arbitrary number of input parameters diff --git a/framework/Models/PostProcessors/ETImporter.py b/framework/Models/PostProcessors/ETImporter.py index 74a1646f30..3de03db685 100644 --- a/framework/Models/PostProcessors/ETImporter.py +++ b/framework/Models/PostProcessors/ETImporter.py @@ -17,8 +17,6 @@ @author: dan maljovec, mandd """ -from __future__ import division, print_function , unicode_literals, absolute_import - #External Modules--------------------------------------------------------------- import numpy as np import xml.etree.ElementTree as ET @@ -81,7 +79,7 @@ def initialize(self, runInfo, inputs, initDict) : @ In, initDict, dict, dictionary with initialization options @ Out, None """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) def _handleInput(self, paramInput): """ @@ -89,7 +87,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) fileFormat = paramInput.findFirst('fileFormat') self.fileFormat = fileFormat.value if self.fileFormat not in self.allowedFormats: @@ -118,4 +116,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - super().collectOutput(self, finishedJob, output, options=options) + super().collectOutput(finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/ETStructure.py b/framework/Models/PostProcessors/ETStructure.py index b7eb94ce52..ffa2bd64f8 100644 --- a/framework/Models/PostProcessors/ETStructure.py +++ b/framework/Models/PostProcessors/ETStructure.py @@ -16,9 +16,6 @@ @author: mandd """ - -from __future__ import division, print_function , unicode_literals, absolute_import - #Internal Modules--------------------------------------------------------------- import MessageHandler from utils import utils diff --git a/framework/Models/PostProcessors/EconomicRatio.py b/framework/Models/PostProcessors/EconomicRatio.py index 3a5d3370ab..1405d77da5 100644 --- a/framework/Models/PostProcessors/EconomicRatio.py +++ b/framework/Models/PostProcessors/EconomicRatio.py @@ -16,15 +16,13 @@ @author: ZHOUJ2 """ -from __future__ import division, print_function , unicode_literals, absolute_import - #External Modules--------------------------------------------------------------- import numpy as np import xarray as xr #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from .BasicStatistics import BasicStatistics from utils import utils from utils import InputData, InputTypes @@ -147,7 +145,7 @@ def initialize(self, runInfo, inputs, initDict): #for backward compatibility, compile the full list of parameters used in Economic Ratio calculations self.parameters['targets'] = list(self.allUsedParams) - PostProcessor.initialize(self, runInfo, inputs, initDict) + PostProcessorInterface.initialize(self, runInfo, inputs, initDict) inputObj = inputs[-1] if type(inputs) == list else inputs inputMetaKeys = [] outputMetaKeys = [] diff --git a/framework/Models/PostProcessors/ExternalPostProcessor.py b/framework/Models/PostProcessors/ExternalPostProcessor.py index f167d1f3f5..77c3c08a47 100644 --- a/framework/Models/PostProcessors/ExternalPostProcessor.py +++ b/framework/Models/PostProcessors/ExternalPostProcessor.py @@ -16,20 +16,18 @@ @author: alfoa """ -from __future__ import division, print_function , unicode_literals, absolute_import - #External Modules--------------------------------------------------------------- import numpy as np import copy #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import InputData, InputTypes, utils import Files #Internal Modules End----------------------------------------------------------- -class ExternalPostProcessor(PostProcessor): +class ExternalPostProcessor(PostProcessorInterface): """ ExternalPostProcessor class. It will apply an arbitrary python function to a dataset and append each specified function's output to the output data @@ -139,7 +137,7 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) for key in self.assemblerDict.keys(): if 'Function' in key: for val in self.assemblerDict[key]: @@ -162,7 +160,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for child in paramInput.subparts: if child.getName() == 'method': methods = child.value.split(',') diff --git a/framework/Models/PostProcessors/FTGate.py b/framework/Models/PostProcessors/FTGate.py index b697fe424a..7199a7877d 100644 --- a/framework/Models/PostProcessors/FTGate.py +++ b/framework/Models/PostProcessors/FTGate.py @@ -16,9 +16,6 @@ @author: mandd """ - -from __future__ import division, print_function , unicode_literals, absolute_import - #Internal Modules--------------------------------------------------------------- import MessageHandler from utils import utils @@ -274,4 +271,3 @@ def cardinalityGate(argumentValues,l,h): else: outcome = 0 return outcome - diff --git a/framework/Models/PostProcessors/Factory.py b/framework/Models/PostProcessors/Factory.py index f801a34216..79659e5f6e 100644 --- a/framework/Models/PostProcessors/Factory.py +++ b/framework/Models/PostProcessors/Factory.py @@ -49,13 +49,7 @@ except ImportError: renaming = {} -factory = EntityFactory('PostProcessor', needsRunInfo=True) -factory.registerAllSubtypes(Model, alias=renaming) - -## Here the class methods are called to fill the information about the usage of the classes -for className in factory.knownTypes(): - classType = factory.returnClass(className) - classType.generateValidateDict() - classType.specializeValidateDict() +factory = EntityFactory('PostProcessorInterface', needsRunInfo=True) +factory.registerAllSubtypes(PostProcessorInterface, alias=renaming) factory.registerType('External', ExternalPostProcessor) diff --git a/framework/Models/PostProcessors/FastFourierTransform.py b/framework/Models/PostProcessors/FastFourierTransform.py index 8a9215dac5..6132325ca7 100644 --- a/framework/Models/PostProcessors/FastFourierTransform.py +++ b/framework/Models/PostProcessors/FastFourierTransform.py @@ -16,18 +16,16 @@ @author: talbpaul """ -from __future__ import division, print_function , unicode_literals, absolute_import - #External Modules--------------------------------------------------------------- import numpy as np #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- from utils import InputData, InputTypes -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface #Internal Modules End----------------------------------------------------------- -class FastFourierTransform(PostProcessor): +class FastFourierTransform(PostProcessorInterface): """ Constructs fast-fourier transform data for a history Outputs are "frequency" for each index and "amplitude" for each target @@ -66,7 +64,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already-parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for child in paramInput.subparts: tag = child.getName() if tag == 'target': diff --git a/framework/Models/PostProcessors/ImportanceRank.py b/framework/Models/PostProcessors/ImportanceRank.py index 47045e280d..70f83063d3 100644 --- a/framework/Models/PostProcessors/ImportanceRank.py +++ b/framework/Models/PostProcessors/ImportanceRank.py @@ -16,8 +16,6 @@ @author: alfoa """ -from __future__ import division, print_function , unicode_literals, absolute_import - #External Modules------------------------------------------------------------------------------------ import numpy as np import os @@ -26,13 +24,13 @@ #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import InputData, InputTypes import Files from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase #Internal Modules End-------------------------------------------------------------------------------- -class ImportanceRank(PostProcessor): +class ImportanceRank(PostProcessorInterface): """ ImportantRank class. It computes the important rank for given input parameters 1. The importance of input parameters can be ranked via their sensitivies (SI: sensitivity index) @@ -120,7 +118,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for child in paramInput.subparts: if child.getName() == 'what': what = child.value.strip() @@ -208,7 +206,7 @@ def initialize(self, runInfo, inputs, initDict) : @ In, inputs, list, list of inputs @ In, initDict, dict, dictionary with initialization options """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) self.mvnDistribution = self.retrieveObjectFromAssemblerDict('mvnDistribution', self.mvnDistribution) def inputToInternal(self, currentInp): """ diff --git a/framework/Models/PostProcessors/InterfacedPostProcessor.py b/framework/Models/PostProcessors/InterfacedPostProcessor.py index 029a74dadd..174a10a7b3 100644 --- a/framework/Models/PostProcessors/InterfacedPostProcessor.py +++ b/framework/Models/PostProcessors/InterfacedPostProcessor.py @@ -18,11 +18,11 @@ """ import importlib -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import InputData from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase -class InterfacedPostProcessor(PostProcessor): +class InterfacedPostProcessor(PostProcessorInterface): """ This class allows to interface a general-purpose post-processor created ad-hoc by the user. While the ExternalPostProcessor is designed for analysis-dependent cases, the InterfacedPostProcessor is designed more generic cases @@ -43,10 +43,7 @@ class cls. specifying input of cls. """ ## This will replace the lines above - inputSpecification = super(RavenOutput, cls).getInputSpecification() - - ## TODO: Fill this in with the appropriate tags - + inputSpecification = super().getInputSpecification() return inputSpecification def __init__(self): @@ -71,7 +68,7 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) inputObj = inputs[-1] if type(inputs) == list else inputs metaKeys = inputObj.getVars('meta') @@ -185,4 +182,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - PostProcessor.collectOutput(self, finishedJob, output, options=options) + super().collectOutput(self, finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/LimitSurface.py b/framework/Models/PostProcessors/LimitSurface.py index b4b35f0542..e17c582597 100644 --- a/framework/Models/PostProcessors/LimitSurface.py +++ b/framework/Models/PostProcessors/LimitSurface.py @@ -16,8 +16,6 @@ @author: alfoa """ -from __future__ import division, print_function , unicode_literals, absolute_import - #External Modules------------------------------------------------------------------------------------ import numpy as np import copy @@ -25,14 +23,14 @@ #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import InputData, InputTypes, utils, mathUtils import LearningGate import GridEntities import Files #Internal Modules End-------------------------------------------------------------------------------- -class LimitSurface(PostProcessor): +class LimitSurface(PostProcessorInterface): """ LimitSurface filter class. It computes the limit surface associated to a dataset """ @@ -47,7 +45,7 @@ class cls. specifying input of cls. """ ## This will replace the lines above - inputSpecification = super(LimitSurface, cls).getInputSpecification() + inputSpecification = super().getInputSpecification() ParametersInput = InputData.parameterInputFactory("parameters", contentType=InputTypes.StringType) inputSpecification.addSub(ParametersInput) @@ -122,7 +120,7 @@ def _initializeLSpp(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) self.gridEntity = GridEntities.factory.returnInstance("MultiGridEntity") self.externalFunction = self.assemblerDict['Function'][0][3] if 'ROM' not in self.assemblerDict.keys(): @@ -234,7 +232,7 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) self._initializeLSpp(runInfo, inputs, initDict) self._initializeLSppROM(self.inputs[self.indexes]) @@ -304,7 +302,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) initDict = {} for child in paramInput.subparts: initDict[child.getName()] = child.value diff --git a/framework/Models/PostProcessors/LimitSurfaceIntegral.py b/framework/Models/PostProcessors/LimitSurfaceIntegral.py index f68f8fc309..4339ab7c9f 100644 --- a/framework/Models/PostProcessors/LimitSurfaceIntegral.py +++ b/framework/Models/PostProcessors/LimitSurfaceIntegral.py @@ -20,12 +20,12 @@ import xarray import math -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import InputData, InputTypes import LearningGate -class LimitSurfaceIntegral(PostProcessor): +class LimitSurfaceIntegral(PostProcessorInterface): """ This post-processor computes the n-dimensional integral of a Limit Surface """ @@ -40,7 +40,7 @@ class cls. specifying input of cls. """ ## This will replace the lines above - inputSpecification = super(LimitSurfaceIntegral, cls).getInputSpecification() + inputSpecification = super().getInputSpecification() LSIVariableInput = InputData.parameterInputFactory("variable") LSIVariableInput.addParam("name", InputTypes.StringType) @@ -104,7 +104,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for child in paramInput.subparts: varName = None if child.getName() == 'variable': diff --git a/framework/Models/PostProcessors/MCSimporter.py b/framework/Models/PostProcessors/MCSimporter.py index c8e98cec0f..10de7a24de 100644 --- a/framework/Models/PostProcessors/MCSimporter.py +++ b/framework/Models/PostProcessors/MCSimporter.py @@ -28,14 +28,14 @@ #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import InputData, InputTypes from utils import xmlUtils as xmlU from utils import utils #Internal Modules End----------------------------------------------------------- -class MCSImporter(PostProcessor): +class MCSImporter(PostProcessorInterface): """ This is the base class of the PostProcessor that imports Minimal Cut Sets (MCSs) into RAVEN as a PointSet """ @@ -65,7 +65,7 @@ def getInputSpecification(cls): @ Out, inputSpecification, InputData.ParameterInput, class to use for specifying input of cls. """ - inputSpecification = super(MCSImporter, cls).getInputSpecification() + inputSpecification = super().getInputSpecification() inputSpecification.addSub(InputData.parameterInputFactory("expand", contentType=InputTypes.BoolType)) inputSpecification.addSub(InputData.parameterInputFactory("BElistColumn", contentType=InputTypes.StringType)) return inputSpecification @@ -78,7 +78,7 @@ def initialize(self, runInfo, inputs, initDict) : @ In, initDict, dict, dictionary with initialization options @ Out, None """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) def _handleInput(self, paramInput): """ @@ -86,7 +86,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) expand = paramInput.findFirst('expand') self.expand = expand.value @@ -156,7 +156,7 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - PostProcessor.collectOutput(self, finishedJob, output, options=options) + super().collectOutput(self, finishedJob, output, options=options) def mcsReader(mcsListFile): """ diff --git a/framework/Models/PostProcessors/Metric.py b/framework/Models/PostProcessors/Metric.py index 41fc087faf..8bf1d32516 100644 --- a/framework/Models/PostProcessors/Metric.py +++ b/framework/Models/PostProcessors/Metric.py @@ -16,8 +16,6 @@ @author: wangc """ -from __future__ import division, print_function , unicode_literals, absolute_import - #External Modules------------------------------------------------------------------------------------ import numpy as np import os @@ -32,10 +30,10 @@ import Files import Distributions import MetricDistributor -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface #Internal Modules End-------------------------------------------------------------------------------- -class Metric(PostProcessor): +class Metric(PostProcessorInterface): """ Metrics class. """ @@ -207,7 +205,7 @@ def initialize(self, runInfo, inputs, initDict) : @ In, inputs, list, list of inputs @ In, initDict, dict, dictionary with initialization options """ - PostProcessor.initialize(self, runInfo, inputs, initDict) + super().initialize(self, runInfo, inputs, initDict) for metricIn in self.assemblerDict['Metric']: self.metricsDict[metricIn[2]] = metricIn[3] @@ -217,7 +215,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for child in paramInput.subparts: if child.getName() == 'Metric': if 'type' not in child.parameterValues.keys() or 'class' not in child.parameterValues.keys(): diff --git a/framework/Models/PostProcessors/ParetoFrontierPostProcessor.py b/framework/Models/PostProcessors/ParetoFrontierPostProcessor.py index df30acd2cb..1e8934aa99 100644 --- a/framework/Models/PostProcessors/ParetoFrontierPostProcessor.py +++ b/framework/Models/PostProcessors/ParetoFrontierPostProcessor.py @@ -22,14 +22,14 @@ #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import utils from utils import InputData, InputTypes from utils import frontUtils import Runners #Internal Modules End----------------------------------------------------------- -class ParetoFrontier(PostProcessor): +class ParetoFrontier(PostProcessorInterface): """ This postprocessor selects the points that lie on the Pareto frontier The postprocessor acts only on PointSet and return a subset of such PointSet @@ -58,7 +58,7 @@ class cls. @ Out, inputSpecification, InputData.ParameterInput, class to use for specifying input of cls. """ - inputSpecification = super(ParetoFrontier, cls).getInputSpecification() + inputSpecification = super().getInputSpecification() objDataType = InputTypes.makeEnumType("objective", "objectiveType", ['min','max']) @@ -145,4 +145,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - PostProcessor.collectOutput(self, finishedJob, output, options=options) + super().collectOutput(self, finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/PostProcessorInterface.py b/framework/Models/PostProcessors/PostProcessorInterface.py index 791856f760..c789ad7c5a 100644 --- a/framework/Models/PostProcessors/PostProcessorInterface.py +++ b/framework/Models/PostProcessors/PostProcessorInterface.py @@ -20,11 +20,13 @@ #External Modules------------------------------------------------------------------------------------ import copy import abc +import os #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ from BaseClasses import BaseInterface from utils import InputTypes, InputData +import Runners #Internal Modules End-------------------------------------------------------------------------------- class PostProcessorInterface(BaseInterface): @@ -70,7 +72,7 @@ def _handleInput(self, paramInput): @ In, paramInput, InputData.ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) def initialize(self, runInfo, inputs, initDict=None): """ diff --git a/framework/Models/PostProcessors/RealizationAverager.py b/framework/Models/PostProcessors/RealizationAverager.py index fbcd5e8bf1..ff83c9eb9c 100644 --- a/framework/Models/PostProcessors/RealizationAverager.py +++ b/framework/Models/PostProcessors/RealizationAverager.py @@ -16,19 +16,17 @@ @author: talbpw """ -from __future__ import division, print_function , unicode_literals, absolute_import - #External Modules--------------------------------------------------------------- import numpy as np #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import utils from utils import InputData, InputTypes #Internal Modules End----------------------------------------------------------- -class RealizationAverager(PostProcessor): +class RealizationAverager(PostProcessorInterface): """ Does the average of multiple realizations along the RAVEN_sampleID dimension ONLY, leaving the other dimensions as they are. @@ -66,7 +64,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already-parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for child in paramInput.subparts: tag = child.getName() if tag == 'target': @@ -113,4 +111,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - PostProcessor.collectOutput(self, finishedJob, output, options=options) + super().collectOutput(self, finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/SafestPoint.py b/framework/Models/PostProcessors/SafestPoint.py index 5dcd4842bc..e4ddfa91f9 100644 --- a/framework/Models/PostProcessors/SafestPoint.py +++ b/framework/Models/PostProcessors/SafestPoint.py @@ -16,8 +16,6 @@ @author: alfoa """ -from __future__ import division, print_function , unicode_literals, absolute_import - #External Modules------------------------------------------------------------------------------------ import numpy as np import xarray @@ -26,13 +24,13 @@ #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from .BasicStatistics import BasicStatistics from utils import InputData, InputTypes from utils.RAVENiterators import ravenArrayIterator #Internal Modules End-------------------------------------------------------------------------------- -class SafestPoint(PostProcessor): +class SafestPoint(PostProcessorInterface): """ It searches for the probability-weighted safest point inside the space of the system controllable variables """ @@ -101,7 +99,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for child in paramInput.subparts: if child.getName() == 'outputName': self.outputName = child.value diff --git a/framework/Models/PostProcessors/SampleSelector.py b/framework/Models/PostProcessors/SampleSelector.py index b0174cf2f0..aae096d38f 100644 --- a/framework/Models/PostProcessors/SampleSelector.py +++ b/framework/Models/PostProcessors/SampleSelector.py @@ -16,19 +16,16 @@ @author: giovannimaronati """ -from __future__ import division, print_function , unicode_literals, absolute_import - #External Modules--------------------------------------------------------------- import numpy as np #External Modules End----------------------------------------------------------- -#Internal Modules--------------------------------------------------------------- -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import utils from utils import InputData, InputTypes #Internal Modules End----------------------------------------------------------- -class SampleSelector(PostProcessor): +class SampleSelector(PostProcessorInterface): """ This postprocessor selects the row in which the minimum or the maximum of a target is found.The postprocessor can act on DataObject, and @@ -71,7 +68,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already-parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for child in paramInput.subparts: tag = child.getName() if tag == 'target': @@ -87,9 +84,6 @@ def _handleInput(self, paramInput): if self.target is None: self.raiseAnError(IOError,'Criterion "{}" requires a be identified!'.format(self.criterion)) - - - def inputToInternal(self, currentInp): """ Method to convert an input object into the internal format that is diff --git a/framework/Models/PostProcessors/TopologicalDecomposition.py b/framework/Models/PostProcessors/TopologicalDecomposition.py index df08bf146f..84871a3233 100644 --- a/framework/Models/PostProcessors/TopologicalDecomposition.py +++ b/framework/Models/PostProcessors/TopologicalDecomposition.py @@ -16,8 +16,6 @@ @author: alfoa """ -from __future__ import division, print_function, absolute_import - #External Modules------------------------------------------------------------------------------------ import numpy as np import time @@ -25,13 +23,13 @@ #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import InputData, InputTypes import Files #Internal Modules End----------------------------------------------------------- -class TopologicalDecomposition(PostProcessor): +class TopologicalDecomposition(PostProcessorInterface): """ TopologicalDecomposition class - Computes an approximated hierarchical Morse-Smale decomposition from an input point cloud consisting of an @@ -48,7 +46,7 @@ class cls. specifying input of cls. """ ## This will replace the lines above - inputSpecification = super(TopologicalDecomposition, cls).getInputSpecification() + inputSpecification = super().getInputSpecification() TDGraphInput = InputData.parameterInputFactory("graph", contentType=InputTypes.StringType) inputSpecification.addSub(TDGraphInput) @@ -144,7 +142,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for child in paramInput.subparts: if child.getName() == "graph": self.graph = child.value.lower() @@ -354,10 +352,7 @@ def run(self, inputIn): __QtAvailable = False if __QtAvailable: - class mQTopologicalDecomposition(type(TopologicalDecomposition), type(qtc.QObject)): - pass - - class QTopologicalDecomposition(TopologicalDecomposition, qtc.QObject, metaclass=mQTopologicalDecomposition): + class QTopologicalDecomposition(TopologicalDecomposition, qtc.QObject): """ TopologicalDecomposition class - Computes an approximated hierarchical Morse-Smale decomposition from an input point cloud consisting of an diff --git a/framework/Models/PostProcessors/ValueDuration.py b/framework/Models/PostProcessors/ValueDuration.py index e9bd9ba555..fdbbeb73fd 100644 --- a/framework/Models/PostProcessors/ValueDuration.py +++ b/framework/Models/PostProcessors/ValueDuration.py @@ -16,19 +16,17 @@ @author: talbpaul """ -from __future__ import division, print_function , unicode_literals, absolute_import - #External Modules--------------------------------------------------------------- import numpy as np #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessor import PostProcessor +from .PostProcessorInterface import PostProcessorInterface from utils import utils from utils import InputData, InputTypes #Internal Modules End----------------------------------------------------------- -class ValueDuration(PostProcessor): +class ValueDuration(PostProcessorInterface): """ Constructs a load duration curve. x-axis is time spent above a particular variable's value, @@ -70,7 +68,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already-parsed input. @ Out, None """ - PostProcessor._handleInput(self, paramInput) + super()._handleInput(self, paramInput) for child in paramInput.subparts: tag = child.getName() if tag == 'target': diff --git a/framework/PostProcessorInterfaceBaseClass.py b/framework/PostProcessorInterfaceBaseClass.py index d625ecb6bc..d58a323abf 100644 --- a/framework/PostProcessorInterfaceBaseClass.py +++ b/framework/PostProcessorInterfaceBaseClass.py @@ -15,8 +15,6 @@ Created on December 1st, 2015 """ -from __future__ import division, print_function, unicode_literals, absolute_import - #External Modules------------------------------------------------------------------------------------ import abc import os diff --git a/framework/Samplers/AdaptiveMonteCarlo.py b/framework/Samplers/AdaptiveMonteCarlo.py index 9fb455017d..2d6d75087f 100644 --- a/framework/Samplers/AdaptiveMonteCarlo.py +++ b/framework/Samplers/AdaptiveMonteCarlo.py @@ -20,7 +20,7 @@ """ import numpy as np -from Models import factory as modelsFactory +from Models.PostProcessors import factory as ppFactory from utils import InputData, InputTypes from .AdaptiveSampler import AdaptiveSampler from .MonteCarlo import MonteCarlo @@ -30,7 +30,7 @@ class AdaptiveMonteCarlo(AdaptiveSampler, MonteCarlo): """ A sampler that will adaptively locate the limit surface of a given problem """ - bS = modelsFactory.returnClass('BasicStatistics') + bS = ppFactory.returnClass('BasicStatistics') statScVals = bS.scalarVals statErVals = bS.steVals usableStats = [] diff --git a/framework/Simulation.py b/framework/Simulation.py index 29bc3db233..a595d146d4 100644 --- a/framework/Simulation.py +++ b/framework/Simulation.py @@ -446,11 +446,7 @@ def XMLread(self,xmlNode,runInfoSkip = set(),xmlFilename=None): #place the instance in the proper dictionary (self.entities[Type]) under his name as key, #the type is the general class (sampler, data, etc) while childChild.tag is the sub type if name not in self.entities[className]: - # postprocessors use subType, so specialize here - if childChild.tag == 'PostProcessor': - entity = self.entityModules[className].factory.returnInstance(childChild.attrib['subType']) - else: - entity = self.entityModules[className].factory.returnInstance(childChild.tag) + entity = self.entityModules[className].factory.returnInstance(childChild.tag) else: self.raiseAnError(IOError,'Redundant naming in the input for class '+className+' and name '+name) entity.applyRunInfo(self.runInfoDict) From 0bd8aa258239399812ea84d0d660c8bacafb27d8 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Tue, 6 Apr 2021 21:12:00 -0600 Subject: [PATCH 04/51] update super() calls --- framework/Models/PostProcessors/BasicStatistics.py | 4 ++-- .../Models/PostProcessors/ComparisonStatisticsModule.py | 4 ++-- framework/Models/PostProcessors/CrossValidation.py | 4 ++-- framework/Models/PostProcessors/DataClassifier.py | 6 +++--- framework/Models/PostProcessors/DataMining.py | 4 ++-- framework/Models/PostProcessors/ExternalPostProcessor.py | 4 ++-- framework/Models/PostProcessors/FTImporter.py | 6 +++--- framework/Models/PostProcessors/FastFourierTransform.py | 2 +- framework/Models/PostProcessors/ImportanceRank.py | 4 ++-- .../Models/PostProcessors/InterfacedPostProcessor.py | 4 ++-- framework/Models/PostProcessors/LimitSurface.py | 6 +++--- framework/Models/PostProcessors/LimitSurfaceIntegral.py | 2 +- framework/Models/PostProcessors/MCSimporter.py | 8 +++----- framework/Models/PostProcessors/Metric.py | 4 ++-- .../Models/PostProcessors/ParetoFrontierPostProcessor.py | 2 +- framework/Models/PostProcessors/RealizationAverager.py | 4 ++-- framework/Models/PostProcessors/SafestPoint.py | 2 +- framework/Models/PostProcessors/SampleSelector.py | 2 +- .../Models/PostProcessors/TopologicalDecomposition.py | 2 +- framework/Models/PostProcessors/ValueDuration.py | 2 +- framework/PostProcessorInterfaces.py | 6 ------ 21 files changed, 37 insertions(+), 45 deletions(-) diff --git a/framework/Models/PostProcessors/BasicStatistics.py b/framework/Models/PostProcessors/BasicStatistics.py index 746a69926d..b985ab4a57 100644 --- a/framework/Models/PostProcessors/BasicStatistics.py +++ b/framework/Models/PostProcessors/BasicStatistics.py @@ -257,7 +257,7 @@ def initialize(self, runInfo, inputs, initDict): #for backward compatibility, compile the full list of parameters used in Basic Statistics calculations self.parameters['targets'] = list(self.allUsedParams) - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) inputObj = inputs[-1] if type(inputs) == list else inputs if inputObj.type == 'HistorySet': self.dynamic = True @@ -1305,4 +1305,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - super().collectOutput(self, finishedJob, output, options=options) + super().collectOutput(finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/ComparisonStatisticsModule.py b/framework/Models/PostProcessors/ComparisonStatisticsModule.py index a4e95bfc27..01815ea46c 100644 --- a/framework/Models/PostProcessors/ComparisonStatisticsModule.py +++ b/framework/Models/PostProcessors/ComparisonStatisticsModule.py @@ -354,7 +354,7 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) def _handleInput(self, paramInput): """ @@ -362,7 +362,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for outer in paramInput.subparts: if outer.getName() == 'compare': compareGroup = ComparisonStatistics.CompareGroup() diff --git a/framework/Models/PostProcessors/CrossValidation.py b/framework/Models/PostProcessors/CrossValidation.py index b4ea563326..202a4f28e7 100644 --- a/framework/Models/PostProcessors/CrossValidation.py +++ b/framework/Models/PostProcessors/CrossValidation.py @@ -106,7 +106,7 @@ def initialize(self, runInfo, inputs, initDict=None) : @ In, inputs, list, list of inputs @ In, initDict, dict, dictionary with initialization options """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) for metricIn in self.assemblerDict['Metric']: if metricIn[2] in self.metricsDict.keys(): @@ -122,7 +122,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) self.initializationOptionDict = {} scoreList = ['maximum', 'average', 'median'] cvNode = paramInput.findFirst('SciKitLearn') diff --git a/framework/Models/PostProcessors/DataClassifier.py b/framework/Models/PostProcessors/DataClassifier.py index fc70443be9..6e7427e328 100644 --- a/framework/Models/PostProcessors/DataClassifier.py +++ b/framework/Models/PostProcessors/DataClassifier.py @@ -77,7 +77,7 @@ def initialize(self, runInfo, inputs, initDict=None): @ In, initDict, dict, optional, dictionary with initialization options @ Out, None """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) for key, val in self.mapping.items(): self.funcDict[key] = self.retrieveObjectFromAssemblerDict('Function',val[1]) @@ -87,7 +87,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for child in paramInput.subparts: if child.getName() == 'variable': func = child.findFirst('Function') @@ -222,4 +222,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - super().collectOutput(self, finishedJob, output, options=options) + super().collectOutput(finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/DataMining.py b/framework/Models/PostProcessors/DataMining.py index 12a747fd23..1a897e6499 100644 --- a/framework/Models/PostProcessors/DataMining.py +++ b/framework/Models/PostProcessors/DataMining.py @@ -382,7 +382,7 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) if "SolutionExport" in initDict: self.solutionExport = initDict["SolutionExport"] if "PreProcessor" in self.assemblerDict: @@ -398,7 +398,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) ## By default, we want to name the 'labels' by the name of this ## postprocessor, but that name is not available before processing the XML ## At this point, we have that information diff --git a/framework/Models/PostProcessors/ExternalPostProcessor.py b/framework/Models/PostProcessors/ExternalPostProcessor.py index 77c3c08a47..3fd79c6263 100644 --- a/framework/Models/PostProcessors/ExternalPostProcessor.py +++ b/framework/Models/PostProcessors/ExternalPostProcessor.py @@ -137,7 +137,7 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) for key in self.assemblerDict.keys(): if 'Function' in key: for val in self.assemblerDict[key]: @@ -160,7 +160,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for child in paramInput.subparts: if child.getName() == 'method': methods = child.value.split(',') diff --git a/framework/Models/PostProcessors/FTImporter.py b/framework/Models/PostProcessors/FTImporter.py index 1bdb1b1820..2e09d7c3c5 100644 --- a/framework/Models/PostProcessors/FTImporter.py +++ b/framework/Models/PostProcessors/FTImporter.py @@ -77,7 +77,7 @@ def initialize(self, runInfo, inputs, initDict) : @ In, initDict, dict, dictionary with initialization options @ Out, None """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) def _handleInput(self, paramInput): """ @@ -85,7 +85,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) fileFormat = paramInput.findFirst('fileFormat') self.fileFormat = fileFormat.value topEventID = paramInput.findFirst('topEventID') @@ -111,4 +111,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - super().collectOutput(self, finishedJob, output, options=options) + super().collectOutput(finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/FastFourierTransform.py b/framework/Models/PostProcessors/FastFourierTransform.py index 6132325ca7..65d4e95a79 100644 --- a/framework/Models/PostProcessors/FastFourierTransform.py +++ b/framework/Models/PostProcessors/FastFourierTransform.py @@ -64,7 +64,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already-parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for child in paramInput.subparts: tag = child.getName() if tag == 'target': diff --git a/framework/Models/PostProcessors/ImportanceRank.py b/framework/Models/PostProcessors/ImportanceRank.py index 70f83063d3..c83c6fc3ad 100644 --- a/framework/Models/PostProcessors/ImportanceRank.py +++ b/framework/Models/PostProcessors/ImportanceRank.py @@ -118,7 +118,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for child in paramInput.subparts: if child.getName() == 'what': what = child.value.strip() @@ -206,7 +206,7 @@ def initialize(self, runInfo, inputs, initDict) : @ In, inputs, list, list of inputs @ In, initDict, dict, dictionary with initialization options """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) self.mvnDistribution = self.retrieveObjectFromAssemblerDict('mvnDistribution', self.mvnDistribution) def inputToInternal(self, currentInp): """ diff --git a/framework/Models/PostProcessors/InterfacedPostProcessor.py b/framework/Models/PostProcessors/InterfacedPostProcessor.py index 174a10a7b3..87bfad327f 100644 --- a/framework/Models/PostProcessors/InterfacedPostProcessor.py +++ b/framework/Models/PostProcessors/InterfacedPostProcessor.py @@ -68,7 +68,7 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) inputObj = inputs[-1] if type(inputs) == list else inputs metaKeys = inputObj.getVars('meta') @@ -182,4 +182,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - super().collectOutput(self, finishedJob, output, options=options) + super().collectOutput(finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/LimitSurface.py b/framework/Models/PostProcessors/LimitSurface.py index e17c582597..6e14bcd0d3 100644 --- a/framework/Models/PostProcessors/LimitSurface.py +++ b/framework/Models/PostProcessors/LimitSurface.py @@ -120,7 +120,7 @@ def _initializeLSpp(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) self.gridEntity = GridEntities.factory.returnInstance("MultiGridEntity") self.externalFunction = self.assemblerDict['Function'][0][3] if 'ROM' not in self.assemblerDict.keys(): @@ -232,7 +232,7 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) self._initializeLSpp(runInfo, inputs, initDict) self._initializeLSppROM(self.inputs[self.indexes]) @@ -302,7 +302,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) initDict = {} for child in paramInput.subparts: initDict[child.getName()] = child.value diff --git a/framework/Models/PostProcessors/LimitSurfaceIntegral.py b/framework/Models/PostProcessors/LimitSurfaceIntegral.py index 4339ab7c9f..c77bd0a1b7 100644 --- a/framework/Models/PostProcessors/LimitSurfaceIntegral.py +++ b/framework/Models/PostProcessors/LimitSurfaceIntegral.py @@ -104,7 +104,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for child in paramInput.subparts: varName = None if child.getName() == 'variable': diff --git a/framework/Models/PostProcessors/MCSimporter.py b/framework/Models/PostProcessors/MCSimporter.py index 10de7a24de..048d955287 100644 --- a/framework/Models/PostProcessors/MCSimporter.py +++ b/framework/Models/PostProcessors/MCSimporter.py @@ -16,8 +16,6 @@ @author: mandd """ - -from __future__ import division, print_function , unicode_literals, absolute_import import warnings warnings.simplefilter('default', DeprecationWarning) @@ -78,7 +76,7 @@ def initialize(self, runInfo, inputs, initDict) : @ In, initDict, dict, dictionary with initialization options @ Out, None """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) def _handleInput(self, paramInput): """ @@ -86,7 +84,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) expand = paramInput.findFirst('expand') self.expand = expand.value @@ -156,7 +154,7 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - super().collectOutput(self, finishedJob, output, options=options) + super().collectOutput(finishedJob, output, options=options) def mcsReader(mcsListFile): """ diff --git a/framework/Models/PostProcessors/Metric.py b/framework/Models/PostProcessors/Metric.py index 8bf1d32516..48abb52ec0 100644 --- a/framework/Models/PostProcessors/Metric.py +++ b/framework/Models/PostProcessors/Metric.py @@ -205,7 +205,7 @@ def initialize(self, runInfo, inputs, initDict) : @ In, inputs, list, list of inputs @ In, initDict, dict, dictionary with initialization options """ - super().initialize(self, runInfo, inputs, initDict) + super().initialize(runInfo, inputs, initDict) for metricIn in self.assemblerDict['Metric']: self.metricsDict[metricIn[2]] = metricIn[3] @@ -215,7 +215,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for child in paramInput.subparts: if child.getName() == 'Metric': if 'type' not in child.parameterValues.keys() or 'class' not in child.parameterValues.keys(): diff --git a/framework/Models/PostProcessors/ParetoFrontierPostProcessor.py b/framework/Models/PostProcessors/ParetoFrontierPostProcessor.py index 1e8934aa99..ac07129f50 100644 --- a/framework/Models/PostProcessors/ParetoFrontierPostProcessor.py +++ b/framework/Models/PostProcessors/ParetoFrontierPostProcessor.py @@ -145,4 +145,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - super().collectOutput(self, finishedJob, output, options=options) + super().collectOutput(finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/RealizationAverager.py b/framework/Models/PostProcessors/RealizationAverager.py index ff83c9eb9c..1401b4b848 100644 --- a/framework/Models/PostProcessors/RealizationAverager.py +++ b/framework/Models/PostProcessors/RealizationAverager.py @@ -64,7 +64,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already-parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for child in paramInput.subparts: tag = child.getName() if tag == 'target': @@ -111,4 +111,4 @@ def collectOutput(self, finishedJob, output, options=None): dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) @ Out, None """ - super().collectOutput(self, finishedJob, output, options=options) + super().collectOutput(finishedJob, output, options=options) diff --git a/framework/Models/PostProcessors/SafestPoint.py b/framework/Models/PostProcessors/SafestPoint.py index e4ddfa91f9..a1badc3f26 100644 --- a/framework/Models/PostProcessors/SafestPoint.py +++ b/framework/Models/PostProcessors/SafestPoint.py @@ -99,7 +99,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for child in paramInput.subparts: if child.getName() == 'outputName': self.outputName = child.value diff --git a/framework/Models/PostProcessors/SampleSelector.py b/framework/Models/PostProcessors/SampleSelector.py index aae096d38f..e7c6f49973 100644 --- a/framework/Models/PostProcessors/SampleSelector.py +++ b/framework/Models/PostProcessors/SampleSelector.py @@ -68,7 +68,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already-parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for child in paramInput.subparts: tag = child.getName() if tag == 'target': diff --git a/framework/Models/PostProcessors/TopologicalDecomposition.py b/framework/Models/PostProcessors/TopologicalDecomposition.py index 84871a3233..1bcea599ac 100644 --- a/framework/Models/PostProcessors/TopologicalDecomposition.py +++ b/framework/Models/PostProcessors/TopologicalDecomposition.py @@ -142,7 +142,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for child in paramInput.subparts: if child.getName() == "graph": self.graph = child.value.lower() diff --git a/framework/Models/PostProcessors/ValueDuration.py b/framework/Models/PostProcessors/ValueDuration.py index fdbbeb73fd..7f64f590b0 100644 --- a/framework/Models/PostProcessors/ValueDuration.py +++ b/framework/Models/PostProcessors/ValueDuration.py @@ -68,7 +68,7 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already-parsed input. @ Out, None """ - super()._handleInput(self, paramInput) + super()._handleInput(paramInput) for child in paramInput.subparts: tag = child.getName() if tag == 'target': diff --git a/framework/PostProcessorInterfaces.py b/framework/PostProcessorInterfaces.py index cabaece938..e7b3f27f4e 100644 --- a/framework/PostProcessorInterfaces.py +++ b/framework/PostProcessorInterfaces.py @@ -15,12 +15,6 @@ Created on December 1, 2015 """ - -#for future compatibility with Python 3-------------------------------------------------------------- -from __future__ import division, print_function, unicode_literals, absolute_import -from __future__ import division, print_function, unicode_literals, absolute_import -#End compatibility block for Python 3---------------------------------------------------------------- - #External Modules------------------------------------------------------------------------------------ import os from glob import glob From 108918e8954b5653dc58d509735514e9ca603dfe Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 8 Apr 2021 10:28:15 -0600 Subject: [PATCH 05/51] update --- framework/BaseClasses/BaseInterface.py | 24 +------------------ framework/Models/PostProcessors/DataMining.py | 5 +++- .../TopologicalDecomposition.py | 5 +++- 3 files changed, 9 insertions(+), 25 deletions(-) diff --git a/framework/BaseClasses/BaseInterface.py b/framework/BaseClasses/BaseInterface.py index f2babeebf7..82018dacfd 100644 --- a/framework/BaseClasses/BaseInterface.py +++ b/framework/BaseClasses/BaseInterface.py @@ -97,7 +97,7 @@ def provideExpectedMetaKeys(self): """ return self.metadataKeys, self.metadataParams - def addMetaKeys(self, args, params=None): + def addMetaKeys(self,args, params={}): """ Adds keywords to a list of expected metadata keys. @ In, args, list(str), keywords to register @@ -105,8 +105,6 @@ def addMetaKeys(self, args, params=None): values of the dictionary are lists of the corresponding indexes/coordinates of given variable @ Out, None """ - if params is None: - params = {} if any(not mathUtils.isAString(a) for a in args): self.raiseAnError('Arguments to addMetaKeys were not all strings:',args) self.metadataKeys = self.metadataKeys.union(set(args)) @@ -228,26 +226,6 @@ def printMe(self): for key in tempDict.keys(): self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key]))) - def provideExpectedMetaKeys(self): - """ - Provides the registered list of metadata keys for this entity. - @ In, None - @ Out, meta, tuple, (set(str),dict), expected keys (empty if none) and indexes/dimensions corresponding to expected keys - """ - return self.metadataKeys, self.metadataParams - - def addMetaKeys(self,args, params={}): - """ - Adds keywords to a list of expected metadata keys. - @ In, args, list(str), keywords to register - @ In, params, dict, optional, {key:[indexes]}, keys of the dictionary are the variable names, - values of the dictionary are lists of the corresponding indexes/coordinates of given variable - @ Out, None - """ - if any(not mathUtils.isAString(a) for a in args): - self.raiseAnError('Arguments to addMetaKeys were not all strings:',args) - self.metadataKeys = self.metadataKeys.union(set(args)) - self.metadataParams.update(params) def _formatSolutionExportVariableNames(self, acceptable): """ diff --git a/framework/Models/PostProcessors/DataMining.py b/framework/Models/PostProcessors/DataMining.py index 1a897e6499..5aceb8e446 100644 --- a/framework/Models/PostProcessors/DataMining.py +++ b/framework/Models/PostProcessors/DataMining.py @@ -903,7 +903,10 @@ def __runTemporalSciKitLearn(self, Input): __QtAvailable = False if __QtAvailable: - class QDataMining(DataMining, qtc.QObject): + class mQDataMining(type(DataMining), type(qtc.QObject)): + pass + + class QDataMining(DataMining, qtc.QObject, metaclass=mQDataMining): """ DataMining class - Computes a hierarchical clustering from an input point cloud consisting of an arbitrary number of input parameters diff --git a/framework/Models/PostProcessors/TopologicalDecomposition.py b/framework/Models/PostProcessors/TopologicalDecomposition.py index 1bcea599ac..1e6d19ca76 100644 --- a/framework/Models/PostProcessors/TopologicalDecomposition.py +++ b/framework/Models/PostProcessors/TopologicalDecomposition.py @@ -352,7 +352,10 @@ def run(self, inputIn): __QtAvailable = False if __QtAvailable: - class QTopologicalDecomposition(TopologicalDecomposition, qtc.QObject): + class mQTopologicalDecomposition(type(TopologicalDecomposition), type(qtc.QObject)): + pass + + class QTopologicalDecomposition(TopologicalDecomposition, qtc.QObject, metaclass=mQTopologicalDecomposition): """ TopologicalDecomposition class - Computes an approximated hierarchical Morse-Smale decomposition from an input point cloud consisting of an From ad53cc8328c8139106d807a1d14355db4ddeb2bb Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 9 Apr 2021 10:25:01 -0600 Subject: [PATCH 06/51] enable Assembler object for post processor --- framework/BaseClasses/BaseInterface.py | 5 ++- framework/Models/PostProcessor.py | 38 ++++++++++++++++++- .../Models/PostProcessors/CrossValidation.py | 2 +- .../PostProcessors/PostProcessorInterface.py | 3 +- 4 files changed, 42 insertions(+), 6 deletions(-) diff --git a/framework/BaseClasses/BaseInterface.py b/framework/BaseClasses/BaseInterface.py index 82018dacfd..afbd193c1b 100644 --- a/framework/BaseClasses/BaseInterface.py +++ b/framework/BaseClasses/BaseInterface.py @@ -20,8 +20,9 @@ from utils import mathUtils from utils.utils import metaclass_insert from BaseClasses import BaseType +from BaseClasses import Assembler -class BaseInterface(metaclass_insert(ABCMeta, BaseType)): +class BaseInterface(metaclass_insert(ABCMeta, Assembler, BaseType)): """ Archetype for "interface" classes, including implementations/strategies/algorithms to execute the intention of BaseEntity types. For example, SupervisedLearning Engines are an Interface @@ -156,7 +157,7 @@ def _readMoreXML(self,xmlNode): @ In, xmlNode, xml.etree.ElementTree.Element, XML element node that represents the portion of the input that belongs to this class @ Out, None """ - pass + super()._readMoreXML(xmlNode) def _handleInput(self, paramInput): """ diff --git a/framework/Models/PostProcessor.py b/framework/Models/PostProcessor.py index 5377efd0ce..27bc84df0f 100644 --- a/framework/Models/PostProcessor.py +++ b/framework/Models/PostProcessor.py @@ -117,6 +117,16 @@ def __init__(self ): self.printTag = 'POSTPROCESSOR MODEL' self._pp = None + def _readMoreXML(self,xmlNode): + """ + Function to read the portion of the xml input that belongs to this specialized class + and initialize some stuff based on the inputs got + @ In, xmlNode, xml.etree.ElementTree.Element, Xml element node + @ Out, None + """ + super()._readMoreXML(xmlNode) + self._pp._readMoreXML(xmlNode) + def _handleInput(self, paramInput): """ Function to handle the common parts of the model parameter input. @@ -125,8 +135,32 @@ def _handleInput(self, paramInput): """ super()._handleInput(paramInput) reqType = paramInput.parameterValues['subType'] - self._pp = interfaceFactory.returnInstance (reqType) - self._pp._handleInput(paramInput) + self._pp = interfaceFactory.returnInstance(reqType) + # self._pp._handleInput(paramInput) + + def whatDoINeed(self): + """ + This method is used mainly by the Simulation class at the Step construction stage. + It is used for inquiring the class, which is implementing the method, about the kind of objects the class needs to + be initialize. + @ In, None + @ Out, needDict, dict, dictionary of objects needed (class:tuple(object type{if None, Simulation does not check the type}, object name)) + """ + needDict = super().whatDoINeed() + needDictInterface = self._pp.whatDoINeed() + needDict.update(needDictInterface) + return needDict + + def generateAssembler(self, initDict): + """ + This method is used mainly by the Simulation class at the Step construction stage. + It is used for sending to the instanciated class, which is implementing the method, the objects that have been requested through "whatDoINeed" method + It is an abstract method -> It must be implemented in the derived class! + @ In, initDict, dict, dictionary ({'mainClassName(e.g., Databases):{specializedObjectName(e.g.,DatabaseForSystemCodeNamedWolf):ObjectInstance}'}) + @ Out, None + """ + super().generateAssembler(initDict) + self._pp.generateAssembler(initDict) def initialize(self, runInfo, inputs, initDict=None): """ diff --git a/framework/Models/PostProcessors/CrossValidation.py b/framework/Models/PostProcessors/CrossValidation.py index 202a4f28e7..d370c44266 100644 --- a/framework/Models/PostProcessors/CrossValidation.py +++ b/framework/Models/PostProcessors/CrossValidation.py @@ -327,7 +327,7 @@ def run(self, inputIn): scoreDict[varName] = np.atleast_1d(np.mean(np.atleast_1d(metricValues))) return scoreDict - def collectOutput(self,finishedJob, output): + def collectOutput(self,finishedJob, output, options=None): """ Function to place all of the computed data into the output object, i.e. Files @ In, finishedJob, object, JobHandler object that is in charge of running this postprocessor diff --git a/framework/Models/PostProcessors/PostProcessorInterface.py b/framework/Models/PostProcessors/PostProcessorInterface.py index c789ad7c5a..d6113cbd15 100644 --- a/framework/Models/PostProcessors/PostProcessorInterface.py +++ b/framework/Models/PostProcessors/PostProcessorInterface.py @@ -43,6 +43,7 @@ class cls. specifying input of cls. """ spec = super().getInputSpecification() + spec.addParam("subType", InputTypes.StringType, True) return spec def __init__(self): @@ -99,7 +100,7 @@ def run(self, input): """ pass - def collectOutput(self,finishedJob,output,options=None): + def collectOutput(self,finishedJob, output, options=None): """ Method that collects the outputs from the "run" method of the PostProcessor @ In, finishedJob, InternalRunner object, instance of the run just finished From 1339b1d063b8baf233bef7a2abc42e22973c3d3a Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 9 Apr 2021 10:44:48 -0600 Subject: [PATCH 07/51] update collectoutput --- framework/Models/PostProcessor.py | 2 +- framework/Models/PostProcessors/Metric.py | 2 +- framework/Models/PostProcessors/PostProcessorInterface.py | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/framework/Models/PostProcessor.py b/framework/Models/PostProcessor.py index 27bc84df0f..b78d9d6788 100644 --- a/framework/Models/PostProcessor.py +++ b/framework/Models/PostProcessor.py @@ -234,4 +234,4 @@ def collectOutput(self, finishedJob, output, options=None): self.raiseAnError(IOError, 'DataObject',output.name,'is used as both input and output of', \ self.interface.printTag, 'This is not allowed! Please use different DataObjet as output') - self._pp.collectOutput(finishedJob, output, options) + self._pp.collectOutput(finishedJob, output) diff --git a/framework/Models/PostProcessors/Metric.py b/framework/Models/PostProcessors/Metric.py index 48abb52ec0..5067707c2f 100644 --- a/framework/Models/PostProcessors/Metric.py +++ b/framework/Models/PostProcessors/Metric.py @@ -240,7 +240,7 @@ def _handleInput(self, paramInput): elif len(self.features) != len(self.targets): self.raiseAnError(IOError, 'The number of variables found in XML node "Features" is not equal the number of variables found in XML node "Targets"') - def collectOutput(self,finishedJob, output): + def collectOutput(self, finishedJob, output, options=None): """ Function to place all of the computed data into the output object, (Files or DataObjects) @ In, finishedJob, object, JobHandler object that is in charge of running this postprocessor diff --git a/framework/Models/PostProcessors/PostProcessorInterface.py b/framework/Models/PostProcessors/PostProcessorInterface.py index d6113cbd15..96ab7ff35f 100644 --- a/framework/Models/PostProcessors/PostProcessorInterface.py +++ b/framework/Models/PostProcessors/PostProcessorInterface.py @@ -43,6 +43,8 @@ class cls. specifying input of cls. """ spec = super().getInputSpecification() + spec.setStrictMode(False) + # spec.strictNode = False spec.addParam("subType", InputTypes.StringType, True) return spec From c2186c4c02a214d500092b7cc0c5d9f269de7f2d Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 9 Apr 2021 11:16:52 -0600 Subject: [PATCH 08/51] update --- framework/Models/PostProcessor.py | 31 +++++++++++++++---- .../PostProcessors/LimitSurfaceIntegral.py | 4 +-- .../PostProcessors/PostProcessorInterface.py | 1 - 3 files changed, 27 insertions(+), 9 deletions(-) diff --git a/framework/Models/PostProcessor.py b/framework/Models/PostProcessor.py index b78d9d6788..f969369d33 100644 --- a/framework/Models/PostProcessor.py +++ b/framework/Models/PostProcessor.py @@ -45,6 +45,7 @@ class cls. specifying input of cls. """ spec = super().getInputSpecification() + spec.setStrictMode(False) validTypes = list(interfaceFactory.knownTypes()) typeEnum = InputTypes.makeEnumType('PostProcessor', 'PostProcessorType', validTypes) for name in validTypes: @@ -124,7 +125,7 @@ def _readMoreXML(self,xmlNode): @ In, xmlNode, xml.etree.ElementTree.Element, Xml element node @ Out, None """ - super()._readMoreXML(xmlNode) + Model._readMoreXML(self, xmlNode) self._pp._readMoreXML(xmlNode) def _handleInput(self, paramInput): @@ -136,7 +137,6 @@ def _handleInput(self, paramInput): super()._handleInput(paramInput) reqType = paramInput.parameterValues['subType'] self._pp = interfaceFactory.returnInstance(reqType) - # self._pp._handleInput(paramInput) def whatDoINeed(self): """ @@ -144,7 +144,8 @@ def whatDoINeed(self): It is used for inquiring the class, which is implementing the method, about the kind of objects the class needs to be initialize. @ In, None - @ Out, needDict, dict, dictionary of objects needed (class:tuple(object type{if None, Simulation does not check the type}, object name)) + @ Out, needDict, dict, dictionary of objects needed (class:tuple(object type{if None, + Simulation does not check the type}, object name)) """ needDict = super().whatDoINeed() needDictInterface = self._pp.whatDoINeed() @@ -154,9 +155,10 @@ def whatDoINeed(self): def generateAssembler(self, initDict): """ This method is used mainly by the Simulation class at the Step construction stage. - It is used for sending to the instanciated class, which is implementing the method, the objects that have been requested through "whatDoINeed" method - It is an abstract method -> It must be implemented in the derived class! - @ In, initDict, dict, dictionary ({'mainClassName(e.g., Databases):{specializedObjectName(e.g.,DatabaseForSystemCodeNamedWolf):ObjectInstance}'}) + It is used for sending to the instanciated class, which is implementing the method, + the objects that have been requested through "whatDoINeed" method + @ In, initDict, dict, dictionary ({'mainClassName(e.g., Databases): + {specializedObjectName(e.g.,DatabaseForSystemCodeNamedWolf):ObjectInstance}'}) @ Out, None """ super().generateAssembler(initDict) @@ -235,3 +237,20 @@ def collectOutput(self, finishedJob, output, options=None): self.interface.printTag, 'This is not allowed! Please use different DataObjet as output') self._pp.collectOutput(finishedJob, output) + + def provideExpectedMetaKeys(self): + """ + Overrides the base class method to assure child postprocessor is also polled for its keys. + @ In, None + @ Out, meta, tuple, (set(str),dict), expected keys (empty if none) and the indexes related to expected keys + """ + # get keys as per base class + metaKeys,metaParams = super().provideExpectedMetaKeys() + # add postprocessor keys + try: + keys, params = self._pp.provideExpectedMetaKeys() + metaKeys = metaKeys.union(keys) + metaParams.update(params) + except AttributeError: + pass # either "interface" has no method for returning meta keys, or "interface" is not established yet. + return metaKeys, metaParams diff --git a/framework/Models/PostProcessors/LimitSurfaceIntegral.py b/framework/Models/PostProcessors/LimitSurfaceIntegral.py index c77bd0a1b7..cc939640fb 100644 --- a/framework/Models/PostProcessors/LimitSurfaceIntegral.py +++ b/framework/Models/PostProcessors/LimitSurfaceIntegral.py @@ -81,7 +81,7 @@ def __init__(self): @ Out, None """ super().__init__() - from Models import factory as modelsFactory # delay import to allow definition + from Models.PostProcessors import factory as ppFactory # delay import to allow definition self.variableDist = {} # dictionary created upon the .xml input file reading. It stores the distributions for each variable. self.target = None # target that defines the f(x1,x2,...,xn) self.tolerance = 0.0001 # integration tolerance @@ -93,7 +93,7 @@ def __init__(self): self.functionS = None # evaluation classifier for the integration self.errorModel = None # classifier used for the error estimation self.computationPrefix = None # output prefix for the storage of the probability and, if requested, bounding error - self.stat = modelsFactory.returnInstance('BasicStatistics') # instantiation of the 'BasicStatistics' processor, which is used to compute the pb given montecarlo evaluations + self.stat = ppFactory.returnInstance('BasicStatistics') # instantiation of the 'BasicStatistics' processor, which is used to compute the pb given montecarlo evaluations self.stat.what = ['expectedValue'] # expected value calculation self.addAssemblerObject('distribution', InputData.Quantity.zero_to_infinity) # distributions are optional self.printTag = 'POSTPROCESSOR INTEGRAL' # print tag diff --git a/framework/Models/PostProcessors/PostProcessorInterface.py b/framework/Models/PostProcessors/PostProcessorInterface.py index 96ab7ff35f..0e1d763e3e 100644 --- a/framework/Models/PostProcessors/PostProcessorInterface.py +++ b/framework/Models/PostProcessors/PostProcessorInterface.py @@ -44,7 +44,6 @@ class cls. """ spec = super().getInputSpecification() spec.setStrictMode(False) - # spec.strictNode = False spec.addParam("subType", InputTypes.StringType, True) return spec From d6a830fc9d4c9184d9bd54963a229f65b5eb751e Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 9 Apr 2021 12:06:40 -0600 Subject: [PATCH 09/51] use PP factory instead of Model factory --- framework/Models/PostProcessor.py | 11 +---------- framework/Models/PostProcessors/SafestPoint.py | 2 +- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/framework/Models/PostProcessor.py b/framework/Models/PostProcessor.py index f969369d33..66659cc7c4 100644 --- a/framework/Models/PostProcessor.py +++ b/framework/Models/PostProcessor.py @@ -126,18 +126,9 @@ def _readMoreXML(self,xmlNode): @ Out, None """ Model._readMoreXML(self, xmlNode) + self._pp = interfaceFactory.returnInstance(self.subType) self._pp._readMoreXML(xmlNode) - def _handleInput(self, paramInput): - """ - Function to handle the common parts of the model parameter input. - @ In, paramInput, InputData.ParameterInput, the already parsed input. - @ Out, None - """ - super()._handleInput(paramInput) - reqType = paramInput.parameterValues['subType'] - self._pp = interfaceFactory.returnInstance(reqType) - def whatDoINeed(self): """ This method is used mainly by the Simulation class at the Step construction stage. diff --git a/framework/Models/PostProcessors/SafestPoint.py b/framework/Models/PostProcessors/SafestPoint.py index a1badc3f26..6ef256707e 100644 --- a/framework/Models/PostProcessors/SafestPoint.py +++ b/framework/Models/PostProcessors/SafestPoint.py @@ -78,7 +78,7 @@ def __init__(self): """ super().__init__() # delay loading for import order - from Models import factory + from Models.PostProcessors import factory self.controllableDist = {} # dictionary created upon the .xml input file reading. It stores the distributions for each controllable variable. self.nonControllableDist = {} # dictionary created upon the .xml input file reading. It stores the distributions for each non-controllable variable. self.controllableGrid = {} # dictionary created upon the .xml input file reading. It stores the grid type ('value' or 'CDF'), the number of steps and the step length for each controllale variable. From cae97e4e29b530104e3c40b0934c47681aa338dd Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 9 Apr 2021 12:10:51 -0600 Subject: [PATCH 10/51] clean up --- framework/BaseClasses/BaseInterface.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/framework/BaseClasses/BaseInterface.py b/framework/BaseClasses/BaseInterface.py index afbd193c1b..8194afb467 100644 --- a/framework/BaseClasses/BaseInterface.py +++ b/framework/BaseClasses/BaseInterface.py @@ -140,16 +140,6 @@ def readXML(self, xmlNode, variableGroups=None, globalAttributes=None): self.raiseADebug('------Reading Completed for:') self.printMe() - def initialize(self, *args, **kwargs): - """ - provide a basic reading capability from the xml input file for what is common to all types in the simulation than calls _handleInput - that needs to be overloaded and used as API. Each type supported by the simulation should have: name (xml attribute), type (xml tag), - Set up this interface for a particular activity - @ In, args, list, positional arguments - @ In, kwargs, dict, keyword arguments - """ - pass - def _readMoreXML(self,xmlNode): """ Function to read the portion of the xml input that belongs to this specialized class From cf594ba9eb8a67ca7d22f35ae276e7194be92e78 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 9 Apr 2021 14:00:49 -0600 Subject: [PATCH 11/51] fix the way to retrieve the interface properties --- framework/Models/PostProcessor.py | 1 - framework/Models/PostProcessors/PostProcessorInterface.py | 1 - framework/Models/ROM.py | 6 +++--- framework/Samplers/AdaptiveMonteCarlo.py | 2 +- framework/Samplers/LimitSurfaceSearch.py | 4 ++-- framework/SupervisedLearning/ROMCollection.py | 2 +- 6 files changed, 7 insertions(+), 9 deletions(-) diff --git a/framework/Models/PostProcessor.py b/framework/Models/PostProcessor.py index 66659cc7c4..f0c40b0746 100644 --- a/framework/Models/PostProcessor.py +++ b/framework/Models/PostProcessor.py @@ -45,7 +45,6 @@ class cls. specifying input of cls. """ spec = super().getInputSpecification() - spec.setStrictMode(False) validTypes = list(interfaceFactory.knownTypes()) typeEnum = InputTypes.makeEnumType('PostProcessor', 'PostProcessorType', validTypes) for name in validTypes: diff --git a/framework/Models/PostProcessors/PostProcessorInterface.py b/framework/Models/PostProcessors/PostProcessorInterface.py index 0e1d763e3e..d6113cbd15 100644 --- a/framework/Models/PostProcessors/PostProcessorInterface.py +++ b/framework/Models/PostProcessors/PostProcessorInterface.py @@ -43,7 +43,6 @@ class cls. specifying input of cls. """ spec = super().getInputSpecification() - spec.setStrictMode(False) spec.addParam("subType", InputTypes.StringType, True) return spec diff --git a/framework/Models/ROM.py b/framework/Models/ROM.py index ddca2abdcf..a5a1f1bb7f 100644 --- a/framework/Models/ROM.py +++ b/framework/Models/ROM.py @@ -1546,10 +1546,10 @@ def _crossValidationScore(self, trainingSet): # reset the ROM before perform cross validation cvMetrics = {} self.reset() - outputMetrics = self.cvInstance.run([self, trainingSet]) + outputMetrics = self.cvInstance._pp.run([self, trainingSet]) exploredTargets = [] for cvKey, metricValues in outputMetrics.items(): - info = self.cvInstance._returnCharacteristicsOfCvGivenOutputName(cvKey) + info = self.cvInstance._pp._returnCharacteristicsOfCvGivenOutputName(cvKey) if info['targetName'] in exploredTargets: self.raiseAnError(IOError, "Multiple metrics are used in cross validation '", self.cvInstance.name, "' for ROM '", rom.name, "'!") exploredTargets.append(info['targetName']) @@ -1563,7 +1563,7 @@ def _checkCV(self, trainingSize): @ Out, None """ useCV = True - initDict = self.cvInstance.initializationOptionDict + initDict = self.cvInstance._pp.initializationOptionDict if 'SciKitLearn' in initDict.keys() and 'n_splits' in initDict['SciKitLearn'].keys(): if trainingSize < utils.intConversion(initDict['SciKitLearn']['n_splits']): useCV = False diff --git a/framework/Samplers/AdaptiveMonteCarlo.py b/framework/Samplers/AdaptiveMonteCarlo.py index 2d6d75087f..c702b03bd5 100644 --- a/framework/Samplers/AdaptiveMonteCarlo.py +++ b/framework/Samplers/AdaptiveMonteCarlo.py @@ -161,7 +161,7 @@ def localInitialize(self, solutionExport=None): @ Out, None """ self.converged = False - self.basicStatPP = modelsFactory.returnInstance('BasicStatistics') + self.basicStatPP = ppFactory.returnInstance('BasicStatistics') # check if solutionExport is actually a "DataObjects" type "PointSet" if self._solutionExport.type != "PointSet": self.raiseAnError(IOError,'solutionExport type is not a PointSet. Got '+ self._solutionExport.type +'!') diff --git a/framework/Samplers/LimitSurfaceSearch.py b/framework/Samplers/LimitSurfaceSearch.py index daba0650e0..7b1a9a7077 100644 --- a/framework/Samplers/LimitSurfaceSearch.py +++ b/framework/Samplers/LimitSurfaceSearch.py @@ -26,7 +26,7 @@ from scipy import spatial from math import ceil -from Models import factory as modelsFactory +from Models.PostProcessors import factory as ppFactory import Distributions from AMSC_Object import AMSC_Object from utils import randomUtils @@ -355,7 +355,7 @@ def localInitialize(self,solutionExport=None): @ Out, None """ self.converged = False - self.limitSurfacePP = modelsFactory.returnInstance('LimitSurface') + self.limitSurfacePP = ppFactory.returnInstance('LimitSurface') if 'Function' in self.assemblerDict.keys(): self.goalFunction = self.assemblerDict['Function'][0][3] # if 'TargetEvaluation' in self.assemblerDict.keys(): diff --git a/framework/SupervisedLearning/ROMCollection.py b/framework/SupervisedLearning/ROMCollection.py index b12bd07f49..f1beeea01e 100644 --- a/framework/SupervisedLearning/ROMCollection.py +++ b/framework/SupervisedLearning/ROMCollection.py @@ -676,7 +676,7 @@ def readAssembledObjects(self): classifier = self._assembledObjects.get('Classifier', [[None]*4])[0][3] if classifier is not None: # Try using the pp directly, not just the uSVE - classifier = classifier.unSupervisedEngine + classifier = classifier._pp.unSupervisedEngine else: self.raiseAnError(IOError, 'Clustering was requested, but no provided!') self._divisionClassifier = classifier From a91e7a1f1a30abe79727acef7d124c2f1316778c Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Mon, 12 Apr 2021 11:57:41 -0600 Subject: [PATCH 12/51] add docstr for classes --- framework/Models/PostProcessors/DataMining.py | 3 +++ framework/Models/PostProcessors/TopologicalDecomposition.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/framework/Models/PostProcessors/DataMining.py b/framework/Models/PostProcessors/DataMining.py index 5aceb8e446..6cf5dff403 100644 --- a/framework/Models/PostProcessors/DataMining.py +++ b/framework/Models/PostProcessors/DataMining.py @@ -904,6 +904,9 @@ def __runTemporalSciKitLearn(self, Input): if __QtAvailable: class mQDataMining(type(DataMining), type(qtc.QObject)): + """ + Class used to solve the metaclass conflict + """ pass class QDataMining(DataMining, qtc.QObject, metaclass=mQDataMining): diff --git a/framework/Models/PostProcessors/TopologicalDecomposition.py b/framework/Models/PostProcessors/TopologicalDecomposition.py index 1e6d19ca76..b4961143fa 100644 --- a/framework/Models/PostProcessors/TopologicalDecomposition.py +++ b/framework/Models/PostProcessors/TopologicalDecomposition.py @@ -353,6 +353,9 @@ def run(self, inputIn): if __QtAvailable: class mQTopologicalDecomposition(type(TopologicalDecomposition), type(qtc.QObject)): + """ + Class used to solve the metaclass conflict + """ pass class QTopologicalDecomposition(TopologicalDecomposition, qtc.QObject, metaclass=mQTopologicalDecomposition): From 7458d53a4ee58223a798291a8f561f468a89fd6e Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Tue, 13 Apr 2021 08:51:02 -0600 Subject: [PATCH 13/51] resolve comments --- framework/BaseClasses/BaseInterface.py | 4 +++- framework/Models/PostProcessors/PostProcessorInterface.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/framework/BaseClasses/BaseInterface.py b/framework/BaseClasses/BaseInterface.py index 8194afb467..4c235b3654 100644 --- a/framework/BaseClasses/BaseInterface.py +++ b/framework/BaseClasses/BaseInterface.py @@ -98,7 +98,7 @@ def provideExpectedMetaKeys(self): """ return self.metadataKeys, self.metadataParams - def addMetaKeys(self,args, params={}): + def addMetaKeys(self,args, params=None): """ Adds keywords to a list of expected metadata keys. @ In, args, list(str), keywords to register @@ -106,6 +106,8 @@ def addMetaKeys(self,args, params={}): values of the dictionary are lists of the corresponding indexes/coordinates of given variable @ Out, None """ + if params is None: + params = {} if any(not mathUtils.isAString(a) for a in args): self.raiseAnError('Arguments to addMetaKeys were not all strings:',args) self.metadataKeys = self.metadataKeys.union(set(args)) diff --git a/framework/Models/PostProcessors/PostProcessorInterface.py b/framework/Models/PostProcessors/PostProcessorInterface.py index d6113cbd15..540c31dc28 100644 --- a/framework/Models/PostProcessors/PostProcessorInterface.py +++ b/framework/Models/PostProcessors/PostProcessorInterface.py @@ -81,6 +81,7 @@ def initialize(self, runInfo, inputs, initDict=None): @ In, runInfo, dict, it is the run info from the jobHandler @ In, inputs, list, it is a list containing whatever is passed with an input role in the step @ In, initDict, dict, optional, dictionary of all objects available in the step is using this model + @ Out, None """ super().initialize() self.inputs = inputs @@ -98,7 +99,6 @@ def run(self, input): Possible inputs include: dict, xarray.Dataset, pd.DataFrame @ Out, dict, xarray.Dataset, pd.DataFrame --> I think we can avoid collectoutput in the plugin pp """ - pass def collectOutput(self,finishedJob, output, options=None): """ From dd437f28992dc7b6584e9c2af5add6460f8b6d38 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Tue, 13 Apr 2021 13:26:31 -0600 Subject: [PATCH 14/51] postprocessor plugin API --- framework/Models/PostProcessors/ETImporter.py | 39 +----- .../PostProcessorPluginBase.py | 127 ++++++++++++++++++ 2 files changed, 130 insertions(+), 36 deletions(-) create mode 100644 framework/PluginsBaseClasses/PostProcessorPluginBase.py diff --git a/framework/Models/PostProcessors/ETImporter.py b/framework/Models/PostProcessors/ETImporter.py index 3de03db685..4e5a81f22b 100644 --- a/framework/Models/PostProcessors/ETImporter.py +++ b/framework/Models/PostProcessors/ETImporter.py @@ -16,24 +16,13 @@ @author: dan maljovec, mandd """ - -#External Modules--------------------------------------------------------------- -import numpy as np -import xml.etree.ElementTree as ET -import copy -#External Modules End----------------------------------------------------------- - #Internal Modules--------------------------------------------------------------- -from .PostProcessorInterface import PostProcessorInterface from utils import InputData, InputTypes -from utils import xmlUtils as xmlU -from utils import utils -import Files +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from .ETStructure import ETStructure #Internal Modules End----------------------------------------------------------- - -class ETImporter(PostProcessorInterface): +class ETImporter(PostProcessorPluginBase): """ This is the base class of the PostProcessor that imports Event-Trees (ETs) into RAVEN as a PointSet """ @@ -71,16 +60,6 @@ class cls. inputSpecification.addSub(InputData.parameterInputFactory("expand" , contentType=InputTypes.BoolType)) return inputSpecification - def initialize(self, runInfo, inputs, initDict) : - """ - Method to initialize the pp. - @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) - @ In, inputs, list, list of inputs - @ In, initDict, dict, dictionary with initialization options - @ Out, None - """ - super().initialize(runInfo, inputs, initDict) - def _handleInput(self, paramInput): """ Method that handles PostProcessor parameter input block. @@ -91,8 +70,7 @@ def _handleInput(self, paramInput): fileFormat = paramInput.findFirst('fileFormat') self.fileFormat = fileFormat.value if self.fileFormat not in self.allowedFormats: - self.raiseAnError(IOError, 'ETImporterPostProcessor Post-Processor ' + self.name + ', format ' + str(self.fileFormat) + ' : is not supported') - + self.raiseAnError(IOError, 'ETImporter Post-Processor ' + self.name + ', format ' + str(self.fileFormat) + ' : is not supported') expand = paramInput.findFirst('expand') self.expand = expand.value @@ -106,14 +84,3 @@ def run(self, inputs): outputDict, variables = eventTreeModel.returnDict() outputDict = {'data': outputDict, 'dims':{}} return outputDict - - def collectOutput(self, finishedJob, output, options=None): - """ - Function to place all of the computed data into the output object, (DataObjects) - @ In, finishedJob, object, JobHandler object that is in charge of running this PostProcessor - @ In, output, object, the object where we want to place our computed results - @ In, options, dict, optional, not used in PostProcessor. - dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) - @ Out, None - """ - super().collectOutput(finishedJob, output, options=options) diff --git a/framework/PluginsBaseClasses/PostProcessorPluginBase.py b/framework/PluginsBaseClasses/PostProcessorPluginBase.py new file mode 100644 index 0000000000..0e9bd58cd2 --- /dev/null +++ b/framework/PluginsBaseClasses/PostProcessorPluginBase.py @@ -0,0 +1,127 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Created on March 10, 2021 + +@author: wangc +""" + +#External Modules--------------------------------------------------------------- +import abc +#External Modules End----------------------------------------------------------- + +#Internal Modules--------------------------------------------------------------- +import Files +from utils import InputData, InputTypes +from DataObjects import DataObject +from Databases import Database +from .PluginBase import PluginBase +from Models.PostProcessors.PostProcessorInterface import PostProcessorInterface +#Internal Modules End----------------------------------------------------------- + +class PostProcessorPluginBase(PostProcessorInterface, PluginBase): + """ + This class represents a specialized class from which each PostProcessor plugins must inherit from + """ + # List containing the methods that need to be checked in order to assess the + # validity of a certain plugin. This list needs to be populated by the derived class + _methodsToCheck = ['getInputSpecification', '_handleInput', 'run'] + entityType = 'PostProcessor' + + ################################################## + # Methods for Internal Use + ################################################## + def createNewInput(self,inputObjs,samplerType,**kwargs): + """ + This function is used to convert internal DataObjects to user-friendly format of data. + The output from this function will be directly passed to the "run" method. + @ In, inputObjs, list, list of DataObjects + @ In, samplerType, string, is the type of sampler that is calling to generate a new input. + Not used for PostProcessor, and "None" is used during "Step" "PostProcess" handling + @ In, **kwargs, dict, is a dictionary that contains the information coming from the sampler, + a mandatory key is the sampledVars'that contains a dictionary {'name variable':value}. + Not used for PostProcessor, and {'SampledVars':{'prefix':'None'}, 'additionalEdits':{}} + is used during "Step" "PostProcess" handling + @ Out, inputDs, list, list of data set that will be directly used by the "PostProcessor.run" method. + """ + #### TODO: This method probably need to move to PostProcessor Base Class when we have converted + #### all internal PostProcessors to use Dataset + + ## Type 1: DataObjects => Dataset + ## Type 2: File => File + ## Type 3: HDF5 => ? + assert type(inputObjs) == list + inputDs = [] + for inp in inputObjs: + if isinstance(inp, Files.File): + inputDs.append(inp) + elif isinstance(inp, DataObject.DataObject): + # convert to xarray.Dataset + inputDs.append(inp.asDataset()) + elif isinstance(inp, Database): + self.raiseAnError(IOError, "Database", inp.name, "can not be handled directly by this Post Processor") + else: + self.raiseAnError(IOError, "Unknown input is found", str(inp)) + return inputDs + + + ################################################## + # Plugin APIs + ################################################## + @classmethod + def getInputSpecification(cls): + """ + Method to get a reference to a class that specifies the input data for + class cls. + @ In, cls, the class for which we are retrieving the specification + @ Out, inputSpecification, InputData.ParameterInput, class to use for + specifying input of cls. + """ + inputSpecification = super().getInputSpecification() + return inputSpecification + + def __init__(self): + """ + Constructor + @ In, None + @ Out, None + """ + super().__init__() + + def initialize(self, runInfo, inputs, initDict=None): + """ + This function is used to initialize the plugin, i.e. set up working dir, + call the initializePlugin method from the plugin + @ In, runInfo, dict, it is the run info from the jobHandler + @ In, inputs, list, it is a list containing whatever is passed with an input role in the step + @ In, initDict, dict, optional, dictionary of all objects available in the step is using this model + """ + super().initialize(runInfo, inputs, initDict) + + def _handleInput(self, paramInput): + """ + Function to handle the common parts of the model parameter input. + @ In, paramInput, InputData.ParameterInput, the already parsed input. + @ Out, None + """ + super()._handleInput(paramInput) + + ### "run" is required for each specific PostProcessor, it is an abstractmethod in + ### PostProcessorInterface base class. + # def run(self, inputDs): + # """ + # This method executes the postprocessor action. + # @ In, inputDs, list, list of Datasets + # @ Out, outputDs, dict, xarray.Dataset, pd.DataFrame + # """ From 94eeb3898780f69bcc6b6aec4dcc4a9cb34f6488 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 15 Apr 2021 14:44:15 -0600 Subject: [PATCH 15/51] convert FT Importer PP --- framework/Models/PostProcessors/FTImporter.py | 36 ++----------------- 1 file changed, 2 insertions(+), 34 deletions(-) diff --git a/framework/Models/PostProcessors/FTImporter.py b/framework/Models/PostProcessors/FTImporter.py index 2e09d7c3c5..f180bf8127 100644 --- a/framework/Models/PostProcessors/FTImporter.py +++ b/framework/Models/PostProcessors/FTImporter.py @@ -16,24 +16,13 @@ @author: mandd """ -#External Modules--------------------------------------------------------------- -import numpy as np -import xml.etree.ElementTree as ET -import copy -import itertools -from collections import OrderedDict -#External Modules End----------------------------------------------------------- - #Internal Modules--------------------------------------------------------------- -from .PostProcessorInterface import PostProcessorInterface from utils import InputData, InputTypes -from utils import xmlUtils as xmlU -from utils import utils +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from .FTStructure import FTStructure -import Files #Internal Modules End----------------------------------------------------------- -class FTImporter(PostProcessorInterface): +class FTImporter(PostProcessorPluginBase): """ This is the base class of the postprocessor that imports Fault-Trees (FTs) into RAVEN as a PointSet """ @@ -69,16 +58,6 @@ def __init__(self): ## However, the DataObject.load can not be directly used to collect single realization self.outputMultipleRealizations = True - def initialize(self, runInfo, inputs, initDict) : - """ - Method to initialize the pp. - @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) - @ In, inputs, list, list of inputs - @ In, initDict, dict, dictionary with initialization options - @ Out, None - """ - super().initialize(runInfo, inputs, initDict) - def _handleInput(self, paramInput): """ Method that handles PostProcessor parameter input block. @@ -101,14 +80,3 @@ def run(self, inputs): outputDict = faultTreeModel.returnDict() outputDict = {'data': outputDict, 'dims':{}} return outputDict - - def collectOutput(self, finishedJob, output, options=None): - """ - Function to place all of the computed data into the output object, (DataObjects) - @ In, finishedJob, object, JobHandler object that is in charge of running this PostProcessor - @ In, output, object, the object where we want to place our computed results - @ In, options, dict, optional, not used in PostProcessor. - dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) - @ Out, None - """ - super().collectOutput(finishedJob, output, options=options) From 8419f8f298909ff7d0f253089b1cdea4d11fa416 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 15 Apr 2021 15:23:57 -0600 Subject: [PATCH 16/51] convert MCS Importer --- .../Models/PostProcessors/MCSimporter.py | 29 ++----------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/framework/Models/PostProcessors/MCSimporter.py b/framework/Models/PostProcessors/MCSimporter.py index 048d955287..f67166addc 100644 --- a/framework/Models/PostProcessors/MCSimporter.py +++ b/framework/Models/PostProcessors/MCSimporter.py @@ -22,18 +22,14 @@ #External Modules--------------------------------------------------------------- import pandas as pd import numpy as np -import csv #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessorInterface import PostProcessorInterface +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes -from utils import xmlUtils as xmlU -from utils import utils #Internal Modules End----------------------------------------------------------- - -class MCSImporter(PostProcessorInterface): +class MCSImporter(PostProcessorPluginBase): """ This is the base class of the PostProcessor that imports Minimal Cut Sets (MCSs) into RAVEN as a PointSet """ @@ -68,16 +64,6 @@ def getInputSpecification(cls): inputSpecification.addSub(InputData.parameterInputFactory("BElistColumn", contentType=InputTypes.StringType)) return inputSpecification - def initialize(self, runInfo, inputs, initDict) : - """ - Method to initialize the PostProcessor - @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) - @ In, inputs, list, list of inputs - @ In, initDict, dict, dictionary with initialization options - @ Out, None - """ - super().initialize(runInfo, inputs, initDict) - def _handleInput(self, paramInput): """ Method that handles PostProcessor parameter input block. @@ -145,17 +131,6 @@ def run(self, inputs): mcsPointSet = {'data': mcsPointSet, 'dims': {}} return mcsPointSet - def collectOutput(self, finishedJob, output, options=None): - """ - Function to place all of the computed data into the output object, (DataObjects) - @ In, finishedJob, object, JobHandler object that is in charge of running this PostProcessor - @ In, output, object, the object where we want to place our computed results - @ In, options, dict, optional, not used in PostProcessor. - dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) - @ Out, None - """ - super().collectOutput(finishedJob, output, options=options) - def mcsReader(mcsListFile): """ Function designed to read a file containing the set of MCSs and to save it as list of list From a2166f70bb9dd179ed3c3728e9b4b4bc3393e803 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 15 Apr 2021 16:43:02 -0600 Subject: [PATCH 17/51] enable both dict and xarray.Dataset for pp plugin --- .../Models/PostProcessors/DataClassifier.py | 112 +++++++++--------- .../PostProcessorPluginBase.py | 3 +- 2 files changed, 57 insertions(+), 58 deletions(-) diff --git a/framework/Models/PostProcessors/DataClassifier.py b/framework/Models/PostProcessors/DataClassifier.py index 6e7427e328..fec48acc17 100644 --- a/framework/Models/PostProcessors/DataClassifier.py +++ b/framework/Models/PostProcessors/DataClassifier.py @@ -93,7 +93,10 @@ def _handleInput(self, paramInput): func = child.findFirst('Function') funcType = func.parameterValues['type'] funcName = func.value.strip() - self.mapping[child.parameterValues['name']] = (funcType, funcName) + varName = child.parameterValues['name'] + if varName in self.mapping.keys(): + self.raiseAnError(IOError, "The variable {} name is duplicated in the XML input".format(varName)) + self.mapping[varName] = (funcType, funcName) elif child.getName() == 'label': self.label = child.value.strip() @@ -110,66 +113,61 @@ def inputToInternal(self, currentInput): haveClassifier = False haveTarget = False for inputObject in currentInput: - if isinstance(inputObject, dict): - newInput.append(inputObject) + if inputObject.type not in ['PointSet', 'HistorySet']: + self.raiseAnError(IOError, "The input for this postprocesor", self.name, "is not acceptable! Allowed inputs are 'PointSet' and 'HistorySet'.") + if len(inputObject) == 0: + self.raiseAnError(IOError, "The input", inputObject.name, "is empty!") + inputDataset = inputObject.asDataset() + inputParams = inputObject.getVars('input') + outputParams = inputObject.getVars('output') + dataType = None + mappingKeys = self.mapping.keys() + + if set(self.mapping.keys()) == set(inputParams) and self.label in outputParams: + dataType = 'classifier' + if not haveClassifier: + haveClassifier = True + else: + self.raiseAnError(IOError, "Both input data objects have been already processed! No need to execute this postprocessor", self.name) + if inputObject.type != 'PointSet': + self.raiseAnError(IOError, "Only PointSet is allowed as classifier, but HistorySet", inputObject.name, "is provided!") else: - if inputObject.type not in ['PointSet', 'HistorySet']: - self.raiseAnError(IOError, "The input for this postprocesor", self.name, "is not acceptable! Allowed inputs are 'PointSet' and 'HistorySet'.") - if len(inputObject) == 0: - self.raiseAnError(IOError, "The input", inputObject.name, "is empty!") - inputDataset = inputObject.asDataset() - inputParams = inputObject.getVars('input') - outputParams = inputObject.getVars('output') - dataType = None - mappingKeys = self.mapping.keys() - if len(set(mappingKeys)) != len(mappingKeys): - dups = set([elem for elem in mappingKeys if mappingKeys.count(elem) > 1]) - self.raiseAnError(IOError, "The same variable {} name is used multiple times in the XML input".format(dups[0])) - if set(self.mapping.keys()) == set(inputParams) and self.label in outputParams: - dataType = 'classifier' - if not haveClassifier: - haveClassifier = True - else: - self.raiseAnError(IOError, "Both input data objects have been already processed! No need to execute this postprocessor", self.name) - if inputObject.type != 'PointSet': - self.raiseAnError(IOError, "Only PointSet is allowed as classifier, but HistorySet", inputObject.name, "is provided!") + dataType = 'target' + newInput[dataType]['data'] = inputObject.asDataset(outType='dict')['data'] + newInput[dataType]['dims'] = inputObject.getDimensions() + if not haveTarget: + haveTarget = True else: - dataType = 'target' - newInput[dataType]['data'] = inputObject.asDataset(outType='dict')['data'] - newInput[dataType]['dims'] = inputObject.getDimensions() - if not haveTarget: - haveTarget = True - else: - self.raiseAnError(IOError, "None of the input DataObjects can be used as the reference classifier! Either the label", \ - self.label, "is not exist in the output of the DataObjects or the inputs of the DataObjects are not the same as", \ - ','.join(self.mapping.keys())) - newInput[dataType]['input'] = dict.fromkeys(inputParams) - newInput[dataType]['output'] = dict.fromkeys(outputParams) - if inputObject.type == 'PointSet': + self.raiseAnError(IOError, "None of the input DataObjects can be used as the reference classifier! Either the label", \ + self.label, "is not exist in the output of the DataObjects or the inputs of the DataObjects are not the same as", \ + ','.join(self.mapping.keys())) + newInput[dataType]['input'] = dict.fromkeys(inputParams) + newInput[dataType]['output'] = dict.fromkeys(outputParams) + if inputObject.type == 'PointSet': + for elem in inputParams: + newInput[dataType]['input'][elem] = copy.deepcopy(inputDataset[elem].values) + for elem in outputParams: + newInput[dataType]['output'][elem] = copy.deepcopy(inputDataset[elem].values) + newInput[dataType]['type'] = inputObject.type + newInput[dataType]['name'] = inputObject.name + else: + # only extract the last element in each realization for the HistorySet + newInput[dataType]['type'] = inputObject.type + newInput[dataType]['name'] = inputObject.name + numRlzs = len(inputObject) + newInput[dataType]['historySizes'] = dict.fromkeys(range(numRlzs)) + for i in range(numRlzs): + rlz = inputObject.realization(index=i) for elem in inputParams: - newInput[dataType]['input'][elem] = copy.deepcopy(inputDataset[elem].values) + if newInput[dataType]['input'][elem] is None: + newInput[dataType]['input'][elem] = np.empty(0) + newInput[dataType]['input'][elem] = np.append(newInput[dataType]['input'][elem], rlz[elem]) for elem in outputParams: - newInput[dataType]['output'][elem] = copy.deepcopy(inputDataset[elem].values) - newInput[dataType]['type'] = inputObject.type - newInput[dataType]['name'] = inputObject.name - else: - # only extract the last element in each realization for the HistorySet - newInput[dataType]['type'] = inputObject.type - newInput[dataType]['name'] = inputObject.name - numRlzs = len(inputObject) - newInput[dataType]['historySizes'] = dict.fromkeys(range(numRlzs)) - for i in range(numRlzs): - rlz = inputObject.realization(index=i) - for elem in inputParams: - if newInput[dataType]['input'][elem] is None: - newInput[dataType]['input'][elem] = np.empty(0) - newInput[dataType]['input'][elem] = np.append(newInput[dataType]['input'][elem], rlz[elem]) - for elem in outputParams: - if newInput[dataType]['output'][elem] is None: - newInput[dataType]['output'][elem] = np.empty(0) - newInput[dataType]['output'][elem] = np.append(newInput[dataType]['output'][elem], rlz[elem].values[-1]) - if newInput[dataType]['historySizes'][i] is None: - newInput[dataType]['historySizes'][i] = len(rlz[elem].values) + if newInput[dataType]['output'][elem] is None: + newInput[dataType]['output'][elem] = np.empty(0) + newInput[dataType]['output'][elem] = np.append(newInput[dataType]['output'][elem], rlz[elem].values[-1]) + if newInput[dataType]['historySizes'][i] is None: + newInput[dataType]['historySizes'][i] = len(rlz[elem].values) return newInput diff --git a/framework/PluginsBaseClasses/PostProcessorPluginBase.py b/framework/PluginsBaseClasses/PostProcessorPluginBase.py index 0e9bd58cd2..fc0e645894 100644 --- a/framework/PluginsBaseClasses/PostProcessorPluginBase.py +++ b/framework/PluginsBaseClasses/PostProcessorPluginBase.py @@ -68,7 +68,8 @@ def createNewInput(self,inputObjs,samplerType,**kwargs): inputDs.append(inp) elif isinstance(inp, DataObject.DataObject): # convert to xarray.Dataset - inputDs.append(inp.asDataset()) + outType = kwargs.get('outType', 'xrDataset') + inputDs.append(inp.asDataset(outType=outType)) elif isinstance(inp, Database): self.raiseAnError(IOError, "Database", inp.name, "can not be handled directly by this Post Processor") else: From 63cdfad92413815870f98998ac6db52bc4ae1d89 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 16 Apr 2021 15:17:41 -0600 Subject: [PATCH 18/51] convert DataClassifier to use dict as input --- framework/DataObjects/DataSet.py | 1 + framework/Models/PostProcessor.py | 2 + .../Models/PostProcessors/DataClassifier.py | 108 ++++++------------ .../PostProcessorPluginBase.py | 5 +- 4 files changed, 41 insertions(+), 75 deletions(-) diff --git a/framework/DataObjects/DataSet.py b/framework/DataObjects/DataSet.py index 608eaa83b6..54fd1da4c1 100644 --- a/framework/DataObjects/DataSet.py +++ b/framework/DataObjects/DataSet.py @@ -1218,6 +1218,7 @@ def _convertToDict(self): # supporting data dataDict['dims'] = self.getDimensions() dataDict['metadata'] = self.getMeta(general=True) + dataDict['type'] = self.type # main data if self.type == "PointSet": ## initialize with np arrays of objects diff --git a/framework/Models/PostProcessor.py b/framework/Models/PostProcessor.py index f0c40b0746..0c1f5a82ee 100644 --- a/framework/Models/PostProcessor.py +++ b/framework/Models/PostProcessor.py @@ -175,6 +175,8 @@ def createNewInput(self,myInput,samplerType,**kwargs): a mandatory key is the sampledVars'that contains a dictionary {'name variable':value} @ Out, myInput, list, the inputs (list) to start from to generate the new one """ + if 'createNewInput' in dir(self._pp): + myInput = self._pp.createNewInput(myInput,samplerType,**kwargs) return myInput @Parallel() diff --git a/framework/Models/PostProcessors/DataClassifier.py b/framework/Models/PostProcessors/DataClassifier.py index fec48acc17..ff072dbf4b 100644 --- a/framework/Models/PostProcessors/DataClassifier.py +++ b/framework/Models/PostProcessors/DataClassifier.py @@ -16,13 +16,13 @@ @author: Congjian Wang """ -import copy + import numpy as np from utils import InputData, InputTypes, utils -from .PostProcessorInterface import PostProcessorInterface +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase -class DataClassifier(PostProcessorInterface): +class DataClassifier(PostProcessorPluginBase): """ This Post-Processor performs data classification based on given classifier. In order to use this interface post-processor, the users need to provide @@ -100,124 +100,86 @@ def _handleInput(self, paramInput): elif child.getName() == 'label': self.label = child.value.strip() - def inputToInternal(self, currentInput): + def identifyInputs(self, currentInput): """ - Method to convert a list of input objects into the internal format that is - understandable by this pp. - @ In, currentInput, list, a list of DataObjects - @ Out, newInput, list, list of converted data + Method to identify the inputs for classifier and target, respectively + @ In, currentInput, list, a list of dictionaries + @ Out, newInput, dict, dictionary of identified inputs """ if isinstance(currentInput,list) and len(currentInput) != 2: - self.raiseAnError(IOError, "Two inputs DataObjects are required for postprocessor", self.name) + self.raiseAnError(IOError, "Required two inputs for PostProcessor {}, but got {}".format(self.name, len(currentInput))) newInput ={'classifier':{}, 'target':{}} haveClassifier = False haveTarget = False - for inputObject in currentInput: - if inputObject.type not in ['PointSet', 'HistorySet']: + requiredKeys = list(self.mapping.keys()) + [self.label] + for inputDict in currentInput: + print(inputDict['type']) + if inputDict['type'] not in ['PointSet', 'HistorySet']: self.raiseAnError(IOError, "The input for this postprocesor", self.name, "is not acceptable! Allowed inputs are 'PointSet' and 'HistorySet'.") - if len(inputObject) == 0: - self.raiseAnError(IOError, "The input", inputObject.name, "is empty!") - inputDataset = inputObject.asDataset() - inputParams = inputObject.getVars('input') - outputParams = inputObject.getVars('output') dataType = None - mappingKeys = self.mapping.keys() - - if set(self.mapping.keys()) == set(inputParams) and self.label in outputParams: + if set(requiredKeys).issubset(set(inputDict['data'].keys())): dataType = 'classifier' if not haveClassifier: haveClassifier = True else: self.raiseAnError(IOError, "Both input data objects have been already processed! No need to execute this postprocessor", self.name) - if inputObject.type != 'PointSet': - self.raiseAnError(IOError, "Only PointSet is allowed as classifier, but HistorySet", inputObject.name, "is provided!") + if inputDict['type'] != 'PointSet': + self.raiseAnError(IOError, "Only PointSet is allowed as classifier, but got", inputDict['type']) else: dataType = 'target' - newInput[dataType]['data'] = inputObject.asDataset(outType='dict')['data'] - newInput[dataType]['dims'] = inputObject.getDimensions() if not haveTarget: haveTarget = True else: self.raiseAnError(IOError, "None of the input DataObjects can be used as the reference classifier! Either the label", \ self.label, "is not exist in the output of the DataObjects or the inputs of the DataObjects are not the same as", \ ','.join(self.mapping.keys())) - newInput[dataType]['input'] = dict.fromkeys(inputParams) - newInput[dataType]['output'] = dict.fromkeys(outputParams) - if inputObject.type == 'PointSet': - for elem in inputParams: - newInput[dataType]['input'][elem] = copy.deepcopy(inputDataset[elem].values) - for elem in outputParams: - newInput[dataType]['output'][elem] = copy.deepcopy(inputDataset[elem].values) - newInput[dataType]['type'] = inputObject.type - newInput[dataType]['name'] = inputObject.name - else: - # only extract the last element in each realization for the HistorySet - newInput[dataType]['type'] = inputObject.type - newInput[dataType]['name'] = inputObject.name - numRlzs = len(inputObject) - newInput[dataType]['historySizes'] = dict.fromkeys(range(numRlzs)) - for i in range(numRlzs): - rlz = inputObject.realization(index=i) - for elem in inputParams: - if newInput[dataType]['input'][elem] is None: - newInput[dataType]['input'][elem] = np.empty(0) - newInput[dataType]['input'][elem] = np.append(newInput[dataType]['input'][elem], rlz[elem]) - for elem in outputParams: - if newInput[dataType]['output'][elem] is None: - newInput[dataType]['output'][elem] = np.empty(0) - newInput[dataType]['output'][elem] = np.append(newInput[dataType]['output'][elem], rlz[elem].values[-1]) - if newInput[dataType]['historySizes'][i] is None: - newInput[dataType]['historySizes'][i] = len(rlz[elem].values) - + newInput[dataType] = inputDict return newInput def run(self, inputIn): """ This method executes the postprocessor action. - @ In, inputIn, list, list of DataObjects + @ In, inputIn, list, list of input dictionaries @ Out, outputDict, dict, dictionary of outputs """ - inputDict = self.inputToInternal(inputIn) + inputDict = self.identifyInputs(inputIn) targetDict = inputDict['target'] classifierDict = inputDict['classifier'] outputDict = {} outputDict.update(inputDict['target']['data']) outputType = targetDict['type'] - numRlz = utils.first(targetDict['input'].values()).size + dimsDict = targetDict['dims'] + numRlz = utils.first(targetDict['data'].values()).size outputDict[self.label] = [] for i in range(numRlz): tempTargDict = {} - for param, vals in targetDict['input'].items(): - tempTargDict[param] = vals[i] - for param, vals in targetDict['output'].items(): + for param, vals in targetDict['data'].items(): tempTargDict[param] = vals[i] tempClfList = [] labelIndex = None - for key, values in classifierDict['input'].items(): + for key in self.mapping.keys(): calcVal = self.funcDict[key].evaluate("evaluate", tempTargDict) + values = classifierDict['data'][key] inds, = np.where(np.asarray(values) == calcVal) if labelIndex is None: labelIndex = set(inds) else: labelIndex = labelIndex & set(inds) if len(labelIndex) != 1: - self.raiseAnError(IOError, "The parameters", ",".join(tempTargDict.keys()), "with values", ",".join([str(el) for el in tempTargDict.values()]), "could not be put in any class!") - label = classifierDict['output'][self.label][list(labelIndex)[0]] + self.raiseAnError(IOError, "The parameters", ",".join(tempTargDict.keys()), "with values", ",".join([str(el) for el in tempTargDict.values()]), "could not be classified!") + label = classifierDict['data'][self.label][list(labelIndex)[0]] if outputType == 'PointSet': outputDict[self.label].append(label) else: - outputDict[self.label].append(np.asarray([label]*targetDict['historySizes'][i])) + historySize = 1 + for var in targetDict['data'].keys(): + dims = dimsDict[var] + if len(dims) !=0: + historySize = len(targetDict['data'][var][i]) + if self.label not in dimsDict: + dimsDict[self.label] = dims + break + outputDict[self.label].append(np.asarray([label]*historySize)) outputDict[self.label] = np.asarray(outputDict[self.label]) - outputDict = {'data': outputDict, 'dims':inputDict['target']['dims']} + outputDict = {'data': outputDict, 'dims':targetDict['dims']} return outputDict - - def collectOutput(self, finishedJob, output, options=None): - """ - Function to place all of the computed data into the output object - @ In, finishedJob, JobHandler External or Internal instance, A JobHandler object that is in charge of running this post-processor - @ In, output, dataObjects, The object where we want to place our computed results - @ In, options, dict, optional, not used in PostProcessor. - dictionary of options that can be passed in when the collect of the output is performed by another model (e.g. EnsembleModel) - @ Out, None - """ - super().collectOutput(finishedJob, output, options=options) diff --git a/framework/PluginsBaseClasses/PostProcessorPluginBase.py b/framework/PluginsBaseClasses/PostProcessorPluginBase.py index fc0e645894..ff49e0b8d1 100644 --- a/framework/PluginsBaseClasses/PostProcessorPluginBase.py +++ b/framework/PluginsBaseClasses/PostProcessorPluginBase.py @@ -67,8 +67,9 @@ def createNewInput(self,inputObjs,samplerType,**kwargs): if isinstance(inp, Files.File): inputDs.append(inp) elif isinstance(inp, DataObject.DataObject): - # convert to xarray.Dataset - outType = kwargs.get('outType', 'xrDataset') + # Current accept two types: 1) 'dict', 2) 'xrDataset' + # Set default to 'dict', this is consistent with current post-processors + outType = kwargs.get('outType', 'dict') inputDs.append(inp.asDataset(outType=outType)) elif isinstance(inp, Database): self.raiseAnError(IOError, "Database", inp.name, "can not be handled directly by this Post Processor") From 4fe0f842b60eaa6364790c964b96307918bbe21e Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 16 Apr 2021 16:43:13 -0600 Subject: [PATCH 19/51] convert RiskMeasures PP --- framework/DataObjects/DataSet.py | 5 +++ framework/Models/PostProcessors/Factory.py | 1 + .../PostProcessors/RiskMeasuresDiscrete.py} | 44 +++++++++---------- framework/Models/PostProcessors/__init__.py | 1 + .../RiskMeasuresDiscrete/PrintPPS_dump.csv | 2 - .../InterfacedPostProcessor/tests | 17 +------ .../RiskMeasuresDiscrete/modelTHreduced.py | 0 .../RiskMeasuresDiscreteMultipleIE/model1.py | 0 .../RiskMeasuresDiscreteMultipleIE/model2.py | 0 .../testPrintHistorySet_dump.csv | 0 .../testPrintHistorySet_dump.xml | 0 .../testPrintHistorySet_dump_0.csv | 0 .../RiskMeasuresDiscrete/PrintPPS_dump.csv | 2 + .../PrintPPS_dump.csv | 4 +- .../gold/RiskMeasuresTimeDep/PrintHS_0.csv | 0 .../test_riskMeasuresDiscrete.xml | 14 +++--- .../test_riskMeasuresDiscreteMultipleIE.xml | 12 ++--- .../test_riskMonitor.xml | 18 ++++---- .../PostProcessors/RiskMeasuresDiscrete/tests | 17 +++++++ 19 files changed, 73 insertions(+), 64 deletions(-) rename framework/{PostProcessorFunctions/riskMeasuresDiscrete.py => Models/PostProcessors/RiskMeasuresDiscrete.py} (94%) delete mode 100644 tests/framework/PostProcessors/InterfacedPostProcessor/gold/RiskMeasuresDiscrete/PrintPPS_dump.csv rename tests/framework/PostProcessors/{InterfacedPostProcessor => RiskMeasuresDiscrete}/RiskMeasuresDiscrete/modelTHreduced.py (100%) rename tests/framework/PostProcessors/{InterfacedPostProcessor => RiskMeasuresDiscrete}/RiskMeasuresDiscreteMultipleIE/model1.py (100%) rename tests/framework/PostProcessors/{InterfacedPostProcessor => RiskMeasuresDiscrete}/RiskMeasuresDiscreteMultipleIE/model2.py (100%) rename tests/framework/PostProcessors/{InterfacedPostProcessor => RiskMeasuresDiscrete}/RiskMeasuresTimeDep/testPrintHistorySet_dump.csv (100%) rename tests/framework/PostProcessors/{InterfacedPostProcessor => RiskMeasuresDiscrete}/RiskMeasuresTimeDep/testPrintHistorySet_dump.xml (100%) rename tests/framework/PostProcessors/{InterfacedPostProcessor => RiskMeasuresDiscrete}/RiskMeasuresTimeDep/testPrintHistorySet_dump_0.csv (100%) create mode 100644 tests/framework/PostProcessors/RiskMeasuresDiscrete/gold/RiskMeasuresDiscrete/PrintPPS_dump.csv rename tests/framework/PostProcessors/{InterfacedPostProcessor => RiskMeasuresDiscrete}/gold/RiskMeasuresDiscreteMultipleIE/PrintPPS_dump.csv (58%) rename tests/framework/PostProcessors/{InterfacedPostProcessor => RiskMeasuresDiscrete}/gold/RiskMeasuresTimeDep/PrintHS_0.csv (100%) rename tests/framework/PostProcessors/{InterfacedPostProcessor => RiskMeasuresDiscrete}/test_riskMeasuresDiscrete.xml (96%) rename tests/framework/PostProcessors/{InterfacedPostProcessor => RiskMeasuresDiscrete}/test_riskMeasuresDiscreteMultipleIE.xml (97%) rename tests/framework/PostProcessors/{InterfacedPostProcessor => RiskMeasuresDiscrete}/test_riskMonitor.xml (97%) create mode 100644 tests/framework/PostProcessors/RiskMeasuresDiscrete/tests diff --git a/framework/DataObjects/DataSet.py b/framework/DataObjects/DataSet.py index 54fd1da4c1..86db7cf5bd 100644 --- a/framework/DataObjects/DataSet.py +++ b/framework/DataObjects/DataSet.py @@ -1219,6 +1219,11 @@ def _convertToDict(self): dataDict['dims'] = self.getDimensions() dataDict['metadata'] = self.getMeta(general=True) dataDict['type'] = self.type + dataDict['inpVars'] = self.getVars('input') + dataDict['outVars'] = self.getVars('output') + dataDict['numberRealization'] = self.size + dataDict['name'] = self.name + dataDict['metaKeys'] = self.getVars('meta') # main data if self.type == "PointSet": ## initialize with np arrays of objects diff --git a/framework/Models/PostProcessors/Factory.py b/framework/Models/PostProcessors/Factory.py index 79659e5f6e..a44a656b27 100644 --- a/framework/Models/PostProcessors/Factory.py +++ b/framework/Models/PostProcessors/Factory.py @@ -39,6 +39,7 @@ from .ParetoFrontierPostProcessor import ParetoFrontier from .MCSimporter import MCSImporter from .EconomicRatio import EconomicRatio +from .RiskMeasuresDiscrete import RiskMeasuresDiscrete ## These utilize the optional prequisite library PySide, so don't error if they ## do not import appropriately. try: diff --git a/framework/PostProcessorFunctions/riskMeasuresDiscrete.py b/framework/Models/PostProcessors/RiskMeasuresDiscrete.py similarity index 94% rename from framework/PostProcessorFunctions/riskMeasuresDiscrete.py rename to framework/Models/PostProcessors/RiskMeasuresDiscrete.py index e622aa716f..1d9c3db3bb 100644 --- a/framework/PostProcessorFunctions/riskMeasuresDiscrete.py +++ b/framework/Models/PostProcessors/RiskMeasuresDiscrete.py @@ -16,19 +16,15 @@ @author: mandd """ -#for future compatibility with Python 3-------------------------------------------------------------- -from __future__ import division, print_function, unicode_literals, absolute_import -#End compatibility block for Python 3---------------------------------------------------------------- - #External Modules------------------------------------------------------------------------------------ import numpy as np import copy #External Modules End-------------------------------------------------------------------------------- -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase, CheckInterfacePP from utils import InputData, InputTypes +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase -class riskMeasuresDiscrete(PostProcessorInterfaceBase): +class RiskMeasuresDiscrete(PostProcessorPluginBase): """ This class implements the four basic risk-importance measures This class inherits form the base class PostProcessorInterfaceBase and it contains three methods: @@ -36,6 +32,7 @@ class riskMeasuresDiscrete(PostProcessorInterfaceBase): - run """ _availableMeasures = set(['B','FV','RAW','RRW','R0']) + @classmethod def getInputSpecification(cls): """ @@ -46,7 +43,6 @@ class cls. specifying input of cls. """ inputSpecification = super().getInputSpecification() - inputSpecification.setCheckClass(CheckInterfacePP("riskMeasuresDiscrete")) inputSpecification.addSubSimple("measures", InputTypes.StringListType) variableSub = InputData.parameterInputFactory("variable", contentType=InputTypes.StringType) variableSub.addParam("R0values", InputTypes.FloatListType) @@ -63,6 +59,21 @@ class cls. inputSpecification.addSubSimple("method", contentType=InputTypes.StringType) return inputSpecification + def __init__(self): + """ + Constructor + @ In, None + @ Out, None + """ + super().__init__() + self.validDataType = ['PointSet','HistorySet'] # The list of accepted types of DataObject + ## Currently, we have used both DataObject.addRealization and DataObject.load to + ## collect the PostProcessor returned outputs. DataObject.addRealization is used to + ## collect single realization, while DataObject.load is used to collect multiple realizations + ## However, the DataObject.load can not be directly used to collect single realization + ## One possible solution is all postpocessors return a list of realizations, and we only + ## use addRealization method to add the collections into the DataObjects + self.outputMultipleRealizations = True def availableMeasures(cls): """ @@ -71,28 +82,17 @@ def availableMeasures(cls): """ return cls._availableMeasures - def initialize(self): - """ - Method to initialize the Interfaced Post-processor - @ In, None - @ Out, None - """ - PostProcessorInterfaceBase.initialize(self) - self.inputFormat = 'PointSet|HistorySet' - self.outputFormat = 'PointSet' - def _handleInput(self, paramInput): """ Function to handle the parameter input. @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - + super()._handleInput(paramInput) self.variables = {} self.target = {} self.IEData = {} self.temporalID = None - for child in paramInput.subparts: if child.getName() == 'measures': self.measures = set(child.value) @@ -114,7 +114,7 @@ def _handleInput(self, paramInput): val1 = values[0] val2 = values[1] except: - self.raiseAnError(IOError,' Wrong R0values associated to riskMeasuresDiscrete Post-Processor') + self.raiseAnError(IOError,' Wrong R0values associated to RiskMeasuresDiscrete Post-Processor') self.variables[variableID]['R0low'] = min(val1,val2) self.variables[variableID]['R0high'] = max(val1,val2) else: @@ -129,7 +129,7 @@ def _handleInput(self, paramInput): val1 = values[0] val2 = values[1] except: - self.raiseAnError(IOError,' Wrong R1values associated to riskMeasuresDiscrete Post-Processor') + self.raiseAnError(IOError,' Wrong R1values associated to RiskMeasuresDiscrete Post-Processor') self.variables[variableID]['R1low'] = min(val1,val2) self.variables[variableID]['R1high'] = max(val1,val2) else: @@ -147,7 +147,7 @@ def _handleInput(self, paramInput): val1 = values[0] val2 = values[1] except: - self.raiseAnError(IOError,' Wrong target values associated to riskMeasuresDiscrete Post-Processor') + self.raiseAnError(IOError,' Wrong target values associated to RiskMeasuresDiscrete Post-Processor') self.target['low'] = min(val1,val2) self.target['high'] = max(val1,val2) else: diff --git a/framework/Models/PostProcessors/__init__.py b/framework/Models/PostProcessors/__init__.py index f2839cc2ba..df1a6dee02 100644 --- a/framework/Models/PostProcessors/__init__.py +++ b/framework/Models/PostProcessors/__init__.py @@ -40,6 +40,7 @@ from .ParetoFrontierPostProcessor import ParetoFrontier from .MCSimporter import MCSImporter from .EconomicRatio import EconomicRatio +from .RiskMeasuresDiscrete import RiskMeasuresDiscrete ## These utilize the optional prequisite library PySide, so don't error if they ## do not import appropriately. additionalModules = [] diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/gold/RiskMeasuresDiscrete/PrintPPS_dump.csv b/tests/framework/PostProcessors/InterfacedPostProcessor/gold/RiskMeasuresDiscrete/PrintPPS_dump.csv deleted file mode 100644 index 0ce2cffd67..0000000000 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/gold/RiskMeasuresDiscrete/PrintPPS_dump.csv +++ /dev/null @@ -1,2 +0,0 @@ -pump1Time_B,pump2Time_B,valveTime_B,pump1Time_FV,pump2Time_FV,valveTime_FV,pump1Time_RAW,pump2Time_RAW,valveTime_RAW,pump1Time_RRW,pump2Time_RRW,valveTime_RRW,ProbabilityWeight-pump2Time,ProbabilityWeight-pump1Time,ProbabilityWeight,PointProbability,ProbabilityWeight-valveTime,prefix -0.0666666666667,-0.0833333333333,0.666666666667,-0.666666666667,-0.666666666667,-0.666666666667,2.0,1.25,5.0,0.6,0.6,0.6,1.0,1.0,1.0,1.0,1.0,1.0 diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/tests b/tests/framework/PostProcessors/InterfacedPostProcessor/tests index 5baaa26196..55f558db19 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/tests +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/tests @@ -22,7 +22,7 @@ input = 'test_historySetSamplingIntervalAve.xml' csv = 'historySamplingIntervalAve/dump_interval_average_0.csv' output = 'historySamplingIntervalAve/dump_interval_average.xml historySamplingIntervalAve/1-plot_interval_average_line-scatter.pdf' - [../] + [../] [./HistorySetSnapShot] type = 'RavenFramework' input = 'test_historySetSnapshot.xml' @@ -77,27 +77,12 @@ csv = 'TypicalHistoryFromHS/writeTypDataOut_0.csv' output = 'TypicalHistoryFromHS/writeTypDataOut.csv TypicalHistoryFromHS/writeTypDataOut.xml' [../] - [./RiskMeasuresDiscrete] - type = 'RavenFramework' - input = 'test_riskMeasuresDiscrete.xml' - csv = 'RiskMeasuresDiscrete/PrintPPS_dump.csv' - [../] [./metadataUsageInInterfacePP] type = 'RavenFramework' input = 'test_metadata_usage_in_interfacePP.xml' output = 'metadataUsageInInterfacePP/outputDataMChistory.xml metadataUsageInInterfacePP/outputDataMChistory.csv metadataUsageInInterfacePP/historiesSetSyncronized_dump.xml' test_interface_only = True [../] - [./RiskMeasuresDiscreteMultipleIE] - type = 'RavenFramework' - input = 'test_riskMeasuresDiscreteMultipleIE.xml' - csv = 'RiskMeasuresDiscreteMultipleIE/PrintPPS_dump.csv' - [../] - [./RiskMeasuresDiscreteTimeDep] - type = 'RavenFramework' - input = 'test_riskMonitor.xml' - csv = 'RiskMeasuresTimeDep/PrintHS_0.csv' - [../] [./HStoPSoperators] type = 'RavenFramework' input = 'test_HistorySetToPointSetOperators.xml' diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresDiscrete/modelTHreduced.py b/tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresDiscrete/modelTHreduced.py similarity index 100% rename from tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresDiscrete/modelTHreduced.py rename to tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresDiscrete/modelTHreduced.py diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresDiscreteMultipleIE/model1.py b/tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresDiscreteMultipleIE/model1.py similarity index 100% rename from tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresDiscreteMultipleIE/model1.py rename to tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresDiscreteMultipleIE/model1.py diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresDiscreteMultipleIE/model2.py b/tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresDiscreteMultipleIE/model2.py similarity index 100% rename from tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresDiscreteMultipleIE/model2.py rename to tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresDiscreteMultipleIE/model2.py diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresTimeDep/testPrintHistorySet_dump.csv b/tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresTimeDep/testPrintHistorySet_dump.csv similarity index 100% rename from tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresTimeDep/testPrintHistorySet_dump.csv rename to tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresTimeDep/testPrintHistorySet_dump.csv diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresTimeDep/testPrintHistorySet_dump.xml b/tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresTimeDep/testPrintHistorySet_dump.xml similarity index 100% rename from tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresTimeDep/testPrintHistorySet_dump.xml rename to tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresTimeDep/testPrintHistorySet_dump.xml diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresTimeDep/testPrintHistorySet_dump_0.csv b/tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresTimeDep/testPrintHistorySet_dump_0.csv similarity index 100% rename from tests/framework/PostProcessors/InterfacedPostProcessor/RiskMeasuresTimeDep/testPrintHistorySet_dump_0.csv rename to tests/framework/PostProcessors/RiskMeasuresDiscrete/RiskMeasuresTimeDep/testPrintHistorySet_dump_0.csv diff --git a/tests/framework/PostProcessors/RiskMeasuresDiscrete/gold/RiskMeasuresDiscrete/PrintPPS_dump.csv b/tests/framework/PostProcessors/RiskMeasuresDiscrete/gold/RiskMeasuresDiscrete/PrintPPS_dump.csv new file mode 100644 index 0000000000..b6ff65f2f7 --- /dev/null +++ b/tests/framework/PostProcessors/RiskMeasuresDiscrete/gold/RiskMeasuresDiscrete/PrintPPS_dump.csv @@ -0,0 +1,2 @@ +pump1Time_B,pump2Time_B,valveTime_B,pump1Time_FV,pump2Time_FV,valveTime_FV,pump1Time_RAW,pump2Time_RAW,valveTime_RAW,pump1Time_RRW,pump2Time_RRW,valveTime_RRW +0.0666666666667,-0.0833333333333,0.666666666667,-0.666666666667,-0.666666666667,-0.666666666667,2.0,1.25,5.0,0.6,0.6,0.6 diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/gold/RiskMeasuresDiscreteMultipleIE/PrintPPS_dump.csv b/tests/framework/PostProcessors/RiskMeasuresDiscrete/gold/RiskMeasuresDiscreteMultipleIE/PrintPPS_dump.csv similarity index 58% rename from tests/framework/PostProcessors/InterfacedPostProcessor/gold/RiskMeasuresDiscreteMultipleIE/PrintPPS_dump.csv rename to tests/framework/PostProcessors/RiskMeasuresDiscrete/gold/RiskMeasuresDiscreteMultipleIE/PrintPPS_dump.csv index 9747b268f2..b0ac0e9699 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/gold/RiskMeasuresDiscreteMultipleIE/PrintPPS_dump.csv +++ b/tests/framework/PostProcessors/RiskMeasuresDiscrete/gold/RiskMeasuresDiscreteMultipleIE/PrintPPS_dump.csv @@ -1,2 +1,2 @@ -Astatus_FV,Bstatus_FV,Cstatus_FV,Dstatus_FV,Astatus_RAW,Bstatus_RAW,Cstatus_RAW,Dstatus_RAW,Astatus_RRW,Bstatus_RRW,Cstatus_RRW,Dstatus_RRW,ProbabilityWeight-Bstatus,ProbabilityWeight-Dstatus,ProbabilityWeight,PointProbability,ProbabilityWeight-Cstatus,prefix -0.656765676568,0.339933993399,0.339933993399,0.013201320132,66.0198019802,7.45874587459,4.05940594059,1.64686468647,2.91346153846,1.515,1.515,1.01337792642,1.0,1.0,1.0,1.0,1.0,1.0 +Astatus_FV,Bstatus_FV,Cstatus_FV,Dstatus_FV,Astatus_RAW,Bstatus_RAW,Cstatus_RAW,Dstatus_RAW,Astatus_RRW,Bstatus_RRW,Cstatus_RRW,Dstatus_RRW +0.656765676568,0.339933993399,0.339933993399,0.013201320132,66.0198019802,7.45874587459,4.05940594059,1.64686468647,2.91346153846,1.515,1.515,1.01337792642 diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/gold/RiskMeasuresTimeDep/PrintHS_0.csv b/tests/framework/PostProcessors/RiskMeasuresDiscrete/gold/RiskMeasuresTimeDep/PrintHS_0.csv similarity index 100% rename from tests/framework/PostProcessors/InterfacedPostProcessor/gold/RiskMeasuresTimeDep/PrintHS_0.csv rename to tests/framework/PostProcessors/RiskMeasuresDiscrete/gold/RiskMeasuresTimeDep/PrintHS_0.csv diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_riskMeasuresDiscrete.xml b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscrete.xml similarity index 96% rename from tests/framework/PostProcessors/InterfacedPostProcessor/test_riskMeasuresDiscrete.xml rename to tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscrete.xml index ad8e071c24..7193251031 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_riskMeasuresDiscrete.xml +++ b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscrete.xml @@ -9,7 +9,7 @@ Tests of the four risk importance measures: Risk Achievement Worth (RAW), Risk Reduction Worth (RRW), Birnbaum (B) and Fussell-Vesely (FV) - + RiskMeasuresDiscrete FirstMRun,PP @@ -19,21 +19,21 @@ lorentzAttractor.py - + pump1Time,pump2Time,valveTime,Tmax,outcome,pump1State,pump2State,valveState,failureTime - + riskMeasuresDiscrete B,FV,RAW,RRW pump1Time pump2Time valveTime Tmax - + - + 0.00069444 @@ -84,7 +84,7 @@ outPPS - + pump1Time,pump2Time,valveTime @@ -101,5 +101,5 @@ pump1Time_RRW , pump2Time_RRW , valveTime_RRW - + diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_riskMeasuresDiscreteMultipleIE.xml b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscreteMultipleIE.xml similarity index 97% rename from tests/framework/PostProcessors/InterfacedPostProcessor/test_riskMeasuresDiscreteMultipleIE.xml rename to tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscreteMultipleIE.xml index 182fbfdc28..27bb65809a 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_riskMeasuresDiscreteMultipleIE.xml +++ b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscreteMultipleIE.xml @@ -9,13 +9,13 @@ Tests of the four risk importance measures for multiple IEs: Risk Achievement Worth (RAW), Risk Reduction Worth (RRW), Birnbaum (B) and Fussell-Vesely (FV) - + RiskMeasuresDiscreteMultipleIE Run1,Run2,PP 1 - + Astatus,Bstatus,Cstatus,outcome @@ -23,7 +23,7 @@ Bstatus,Cstatus,Dstatus,outcome - + riskMeasuresDiscrete B,FV,RAW,RRW Astatus @@ -33,7 +33,7 @@ outcome outRun1 outRun2 - + @@ -110,7 +110,7 @@ outPPS - + Astatus,Bstatus,Cstatus @@ -132,5 +132,5 @@ Astatus_FV, Bstatus_FV, Cstatus_FV, Dstatus_FV, Astatus_RAW, Bstatus_RAW, Cstatus_RAW, Dstatus_RAW, Astatus_RRW, Bstatus_RRW, Cstatus_RRW, Dstatus_RRW - + diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_riskMonitor.xml b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMonitor.xml similarity index 97% rename from tests/framework/PostProcessors/InterfacedPostProcessor/test_riskMonitor.xml rename to tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMonitor.xml index 89ca916061..a47bc9527b 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_riskMonitor.xml +++ b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMonitor.xml @@ -6,26 +6,26 @@ 2017-02-23 InterfacedPostProcessor - Tests of the four risk importance measures for time dependent data: Risk Achievement Worth (RAW), + Tests of the four risk importance measures for time dependent data: Risk Achievement Worth (RAW), Risk Reduction Worth (RRW), Birnbaum (B) and Fussell-Vesely (FV) - + testPrintHistorySet_dump.csv - + RiskMeasuresTimeDep LoadHS,Run1,PP,plot 1 - + Astatus,Bstatus,Cstatus,outcome - + riskMeasuresDiscrete B,FV,RAW,RRW,R0 Astatus @@ -34,7 +34,7 @@ outcome outRun1 time - + @@ -233,7 +233,7 @@ - + Astatus,Bstatus,Cstatus @@ -261,10 +261,10 @@ x0 time , R0 , Astatus_FV , Bstatus_FV , Cstatus_FV , - Astatus_RAW , Bstatus_RAW , Cstatus_RAW , + Astatus_RAW , Bstatus_RAW , Cstatus_RAW , Astatus_RRW , Bstatus_RRW , Cstatus_RRW - + diff --git a/tests/framework/PostProcessors/RiskMeasuresDiscrete/tests b/tests/framework/PostProcessors/RiskMeasuresDiscrete/tests new file mode 100644 index 0000000000..b2cccd6cb7 --- /dev/null +++ b/tests/framework/PostProcessors/RiskMeasuresDiscrete/tests @@ -0,0 +1,17 @@ +[Tests] + [./RiskMeasuresDiscrete] + type = 'RavenFramework' + input = 'test_riskMeasuresDiscrete.xml' + csv = 'RiskMeasuresDiscrete/PrintPPS_dump.csv' + [../] + [./RiskMeasuresDiscreteMultipleIE] + type = 'RavenFramework' + input = 'test_riskMeasuresDiscreteMultipleIE.xml' + csv = 'RiskMeasuresDiscreteMultipleIE/PrintPPS_dump.csv' + [../] + [./RiskMeasuresDiscreteTimeDep] + type = 'RavenFramework' + input = 'test_riskMonitor.xml' + csv = 'RiskMeasuresTimeDep/PrintHS_0.csv' + [../] +[] From 3310ab806e64962b91b44b1263e8c8f2d0faa571 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Mon, 19 Apr 2021 13:53:14 -0600 Subject: [PATCH 20/51] fix precheck --- framework/Models/PostProcessors/DataClassifier.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/Models/PostProcessors/DataClassifier.py b/framework/Models/PostProcessors/DataClassifier.py index ff072dbf4b..e5b0b3d633 100644 --- a/framework/Models/PostProcessors/DataClassifier.py +++ b/framework/Models/PostProcessors/DataClassifier.py @@ -166,7 +166,7 @@ def run(self, inputIn): else: labelIndex = labelIndex & set(inds) if len(labelIndex) != 1: - self.raiseAnError(IOError, "The parameters", ",".join(tempTargDict.keys()), "with values", ",".join([str(el) for el in tempTargDict.values()]), "could not be classified!") + self.raiseAnError(IOError, "The parameters", ",".join(tempTargDict.keys()), "with values", ",".join([str(el) for el in tempTargDict.values()]), "could not be categorized!") label = classifierDict['data'][self.label][list(labelIndex)[0]] if outputType == 'PointSet': outputDict[self.label].append(label) From 8d435d21de11e6a6d2b035fea85487521442bb00 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 22 Apr 2021 16:48:35 -0600 Subject: [PATCH 21/51] update validateDict --- framework/Models/Model.py | 4 --- framework/Models/PostProcessor.py | 28 ++++++++----------- .../Models/PostProcessors/BasicStatistics.py | 2 -- .../BasicStatistics/test_BasicStatistics.xml | 7 ----- 4 files changed, 11 insertions(+), 30 deletions(-) diff --git a/framework/Models/Model.py b/framework/Models/Model.py index 7cc4f3e9bc..acd9038486 100644 --- a/framework/Models/Model.py +++ b/framework/Models/Model.py @@ -14,10 +14,6 @@ """ Module where the base class and the specialization of different type of Model are """ -#for future compatibility with Python 3-------------------------------------------------------------- -from __future__ import division, print_function, unicode_literals, absolute_import -#End compatibility block for Python 3---------------------------------------------------------------- - #External Modules------------------------------------------------------------------------------------ import copy import numpy as np diff --git a/framework/Models/PostProcessor.py b/framework/Models/PostProcessor.py index 1918cc26d4..ad10f6125e 100644 --- a/framework/Models/PostProcessor.py +++ b/framework/Models/PostProcessor.py @@ -71,13 +71,7 @@ def specializeValidateDict(cls): """ cls.validateDict.pop('Sampler', None) cls.validateDict.pop('Optimizer', None) - #the possible inputs - cls.validateDict['Input'].append(cls.testDict.copy()) - cls.validateDict['Input' ][-1]['class' ] = 'Databases' - cls.validateDict['Input' ][-1]['type' ] = ['HDF5'] - cls.validateDict['Input' ][-1]['required' ] = False - cls.validateDict['Input' ][-1]['multiplicity'] = 'n' - ## datasets + ## Possible Input Datasets dataObjects = cls.validateDict['Input'][0] dataObjects['type'].append('DataSet') # Cross validations will accept Model.ROM @@ -93,17 +87,17 @@ def specializeValidateDict(cls): cls.validateDict['Input' ][-1]['required' ] = False cls.validateDict['Input' ][-1]['multiplicity'] = 'n' #the possible outputs + cls.validateDict['Output'] = [] + cls.validateDict['Output'].append(cls.testDict.copy()) + cls.validateDict['Output' ][0]['class' ] = 'DataObjects' + cls.validateDict['Output' ][0]['type' ] = ['PointSet','HistorySet','DataSet'] + cls.validateDict['Output' ][0]['required' ] = True + cls.validateDict['Output' ][0]['multiplicity'] = 'n' cls.validateDict['Output'].append(cls.testDict.copy()) - cls.validateDict['Output' ][-1]['class' ] = 'Files' - cls.validateDict['Output' ][-1]['type' ] = [''] - cls.validateDict['Output' ][-1]['required' ] = False - cls.validateDict['Output' ][-1]['multiplicity'] = 'n' - # The possible functions - cls.validateDict['Function'] = [cls.testDict.copy()] - cls.validateDict['Function' ][0]['class' ] = 'Functions' - cls.validateDict['Function' ][0]['type' ] = ['External','Internal'] - cls.validateDict['Function' ][0]['required' ] = False - cls.validateDict['Function' ][0]['multiplicity'] = 1 + cls.validateDict['Output' ][1]['class' ] = 'OutStreams' + cls.validateDict['Output' ][1]['type' ] = ['Plot','Print'] + cls.validateDict['Output' ][1]['required' ] = False + cls.validateDict['Output' ][1]['multiplicity'] = 'n' def __init__(self ): """ diff --git a/framework/Models/PostProcessors/BasicStatistics.py b/framework/Models/PostProcessors/BasicStatistics.py index b974d20d88..02b42aeba4 100644 --- a/framework/Models/PostProcessors/BasicStatistics.py +++ b/framework/Models/PostProcessors/BasicStatistics.py @@ -130,9 +130,7 @@ def __init__(self): self.acceptedCalcParam = self.scalarVals + self.vectorVals self.what = self.acceptedCalcParam # what needs to be computed... default...all self.methodsToRun = [] # if a function is present, its outcome name is here stored... if it matches one of the known outcomes, the pp is going to use the function to compute it - self.externalFunction = [] self.printTag = 'PostProcessor BASIC STATISTIC' - self.addAssemblerObject('Function', InputData.Quantity.zero_to_one) self.biased = False # biased statistics? self.pivotParameter = None # time-dependent statistics pivot parameter self.pivotValue = None # time-dependent statistics pivot parameter values diff --git a/tests/framework/PostProcessors/BasicStatistics/test_BasicStatistics.xml b/tests/framework/PostProcessors/BasicStatistics/test_BasicStatistics.xml index 9684a9d1ee..9494ac5ee7 100644 --- a/tests/framework/PostProcessors/BasicStatistics/test_BasicStatistics.xml +++ b/tests/framework/PostProcessors/BasicStatistics/test_BasicStatistics.xml @@ -191,12 +191,6 @@ - - - x01 - - - 4 @@ -239,7 +233,6 @@ PointSetPostProcTest - testFunctionComputeProbability autoCorrelationTest autoCorrelationTest_basicStatPP autoCorrelationTest_basicStatPP_dump From 84bb6b6321172d98de16703e97a7e9b52d0a6a1d Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 22 Apr 2021 17:44:41 -0600 Subject: [PATCH 22/51] update validateDict --- framework/Models/PostProcessor.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/framework/Models/PostProcessor.py b/framework/Models/PostProcessor.py index ad10f6125e..e6444e8108 100644 --- a/framework/Models/PostProcessor.py +++ b/framework/Models/PostProcessor.py @@ -98,6 +98,12 @@ def specializeValidateDict(cls): cls.validateDict['Output' ][1]['type' ] = ['Plot','Print'] cls.validateDict['Output' ][1]['required' ] = False cls.validateDict['Output' ][1]['multiplicity'] = 'n' + ## Currently only used by ComparisonStatistics, we may not allow this option + cls.validateDict['Output'].append(cls.testDict.copy()) + cls.validateDict['Output' ][-1]['class' ] = 'Files' + cls.validateDict['Output' ][-1]['type' ] = [''] + cls.validateDict['Output' ][-1]['required' ] = False + cls.validateDict['Output' ][-1]['multiplicity'] = 'n' def __init__(self ): """ From 6f5e2f737a69afd43d8908c7e42b7fc5858b77f7 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 22 Apr 2021 20:55:25 -0600 Subject: [PATCH 23/51] resolve comments --- framework/Models/PostProcessor.py | 23 +++-- .../Models/PostProcessors/DataClassifier.py | 25 +++-- framework/Models/PostProcessors/ETImporter.py | 11 ++- framework/Models/PostProcessors/FTImporter.py | 11 ++- .../PostProcessors/RiskMeasuresDiscrete.py | 16 ++- .../PostProcessorPluginBase.py | 98 ++++++++++--------- .../test_riskMeasuresDiscrete.xml | 8 +- .../test_riskMeasuresDiscreteMultipleIE.xml | 8 +- .../RiskMeasuresDiscrete/test_riskMonitor.xml | 8 +- 9 files changed, 124 insertions(+), 84 deletions(-) diff --git a/framework/Models/PostProcessor.py b/framework/Models/PostProcessor.py index e6444e8108..a63c16661f 100644 --- a/framework/Models/PostProcessor.py +++ b/framework/Models/PostProcessor.py @@ -171,12 +171,13 @@ def createNewInput(self, myInput, samplerType, **kwargs): (Not used but required by model base class) @ In, myInput, list, the inputs (list) to start from to generate the new one @ In, samplerType, string, passing through (consistent with base class but not used) - @ In, **kwargs, dict, is a dictionary that contains the information coming from the sampler, - a mandatory key is the sampledVars'that contains a dictionary {'name variable':value} + @ In, **kwargs, dict, is a dictionary that contains the information passed by "Step". + Currently not used by PostProcessor. It can be useful by Step to control the input + and output of the PostProcessor, as well as other control options for the PostProcessor @ Out, myInput, list, the inputs (list) to start from to generate the new one """ - if 'createNewInput' in dir(self._pp): - myInput = self._pp.createNewInput(myInput,samplerType,**kwargs) + if 'createPostProcessorInput' in dir(self._pp): + myInput = self._pp.createPostProcessorInput(myInput, **kwargs) return myInput @Parallel() @@ -186,8 +187,9 @@ def evaluateSample(self, myInput, samplerType, kwargs): are needed by createNewInput and thus descriptions are copied from there. @ In, myInput, list, the inputs (list) to start from to generate the new one @ In, samplerType, string, passing through (consistent with base class but not used) - @ In, kwargs, dict, is a dictionary that contains the information coming from the sampler, - a mandatory key is the sampledVars'that contains a dictionary {'name variable':value} + @ In, **kwargs, dict, is a dictionary that contains the information passed by "Step". + Currently not used by PostProcessor. It can be useful by Step to control the input + and output of the PostProcessor, as well as other control options for the PostProcessor @ Out, returnValue, tuple, This will hold two pieces of information, the first item will be the input data used to generate this sample, the second item will be the output of this model given the specified @@ -199,7 +201,7 @@ def evaluateSample(self, myInput, samplerType, kwargs): returnValue = (ppInput, self._pp.run(ppInput)) return returnValue - def submit(self,myInput,samplerType,jobHandler,**kwargs): + def submit(self, myInput, samplerType, jobHandler, **kwargs): """ This will submit an individual sample to be evaluated by this model to a specified jobHandler. Note, some parameters are needed by createNewInput @@ -207,12 +209,13 @@ def submit(self,myInput,samplerType,jobHandler,**kwargs): @ In, myInput, list, the inputs (list) to start from to generate the new one @ In, samplerType, string, passing through (consistent with base class but not used) @ In, jobHandler, JobHandler instance, the global job handler instance - @ In, **kwargs, dict, is a dictionary that contains the information coming from the sampler, - a mandatory key is the sampledVars'that contains a dictionary {'name variable':value} + @ In, **kwargs, dict, is a dictionary that contains the information passed by "Step". + Currently not used by PostProcessor. It can be useful by Step to control the input + and output of the PostProcessor, as well as other control options for the PostProcessor @ Out, None """ kwargs['forceThreads'] = True - super().submit(myInput, samplerType, jobHandler,**kwargs) + super().submit(myInput, samplerType, jobHandler, **kwargs) def collectOutput(self, finishedJob, output): """ diff --git a/framework/Models/PostProcessors/DataClassifier.py b/framework/Models/PostProcessors/DataClassifier.py index e5b0b3d633..5fe09d6280 100644 --- a/framework/Models/PostProcessors/DataClassifier.py +++ b/framework/Models/PostProcessors/DataClassifier.py @@ -100,20 +100,26 @@ def _handleInput(self, paramInput): elif child.getName() == 'label': self.label = child.value.strip() - def identifyInputs(self, currentInput): + def identifyInputs(self, inputData): """ - Method to identify the inputs for classifier and target, respectively - @ In, currentInput, list, a list of dictionaries - @ Out, newInput, dict, dictionary of identified inputs + Method to identify the type (i.e., 'classifier' or 'target') of input data. + If the input data contains 'label' and required 'variables' (provided by XML input file), + the input data will assign type 'classifier', otherwise 'target' + Please check 'PluginsBaseClasses.PostProcessorPluginBase' for the detailed descriptions + about 'inputData' and the output 'newInput'. + @ In, inputData, dict, dictionary contains the input data and input files, i.e., + {'Data':[DataObjects.asDataset('dict')], 'Files':[FileObject]} + @ Out, newInput, dict, dictionary of identified inputs, i.e., + {'classifier':DataObjects.asDataset('dict'), 'target':DataObjects.asDataset('dict')} """ - if isinstance(currentInput,list) and len(currentInput) != 2: + currentInput = inputData['Data'] + if len(currentInput) != 2: self.raiseAnError(IOError, "Required two inputs for PostProcessor {}, but got {}".format(self.name, len(currentInput))) newInput ={'classifier':{}, 'target':{}} haveClassifier = False haveTarget = False requiredKeys = list(self.mapping.keys()) + [self.label] for inputDict in currentInput: - print(inputDict['type']) if inputDict['type'] not in ['PointSet', 'HistorySet']: self.raiseAnError(IOError, "The input for this postprocesor", self.name, "is not acceptable! Allowed inputs are 'PointSet' and 'HistorySet'.") dataType = None @@ -139,8 +145,11 @@ def identifyInputs(self, currentInput): def run(self, inputIn): """ This method executes the postprocessor action. - @ In, inputIn, list, list of input dictionaries - @ Out, outputDict, dict, dictionary of outputs + @ In, inputIn, dict, dictionary contains the input data and input files, i.e., + {'Data':[DataObjects.asDataset('dict')], 'Files':[FileObject]}, only 'Data' + will be used by this PostProcessor + @ Out, outputDict, dict, dictionary of outputs, i.e., + {'data':dict of realizations, 'dim':{varName:independent dimensions that the variable depends on}} """ inputDict = self.identifyInputs(inputIn) targetDict = inputDict['target'] diff --git a/framework/Models/PostProcessors/ETImporter.py b/framework/Models/PostProcessors/ETImporter.py index 4e5a81f22b..e36529bc74 100644 --- a/framework/Models/PostProcessors/ETImporter.py +++ b/framework/Models/PostProcessors/ETImporter.py @@ -74,13 +74,16 @@ def _handleInput(self, paramInput): expand = paramInput.findFirst('expand') self.expand = expand.value - def run(self, inputs): + def run(self, inputIn): """ This method executes the PostProcessor action. - @ In, inputs, list, list of file objects - @ Out, outputDict, dict, dictionary of outputs + @ In, inputIn, dict, dictionary contains the input data and input files, i.e., + {'Data':[DataObjects.asDataset('dict')], 'Files':[FileObject]}, only 'Files' + will be used by this PostProcessor + @ Out, outputDict, dict, dictionary of outputs, i.e., + {'data':dict of realizations, 'dim':{varName:independent dimensions that the variable depends on}} """ - eventTreeModel = ETStructure(self.expand, inputs) + eventTreeModel = ETStructure(self.expand, inputIn['Files']) outputDict, variables = eventTreeModel.returnDict() outputDict = {'data': outputDict, 'dims':{}} return outputDict diff --git a/framework/Models/PostProcessors/FTImporter.py b/framework/Models/PostProcessors/FTImporter.py index f180bf8127..41d466b8ac 100644 --- a/framework/Models/PostProcessors/FTImporter.py +++ b/framework/Models/PostProcessors/FTImporter.py @@ -70,13 +70,16 @@ def _handleInput(self, paramInput): topEventID = paramInput.findFirst('topEventID') self.topEventID = topEventID.value - def run(self, inputs): + def run(self, inputIn): """ This method executes the postprocessor action. - @ In, inputs, list, list of file objects - @ Out, outputDict, dict, dict containing the processed FT + @ In, inputIn, dict, dictionary contains the input data and input files, i.e., + {'Data':[DataObjects.asDataset('dict')], 'Files':[FileObject]}, only 'Files' + will be used by this PostProcessor + @ Out, outputDict, dict, dictionary of outputs, i.e., + {'data':dict of realizations, 'dim':{varName:independent dimensions that the variable depends on}} """ - faultTreeModel = FTStructure(inputs, self.topEventID) + faultTreeModel = FTStructure(inputIn['Files'], self.topEventID) outputDict = faultTreeModel.returnDict() outputDict = {'data': outputDict, 'dims':{}} return outputDict diff --git a/framework/Models/PostProcessors/RiskMeasuresDiscrete.py b/framework/Models/PostProcessors/RiskMeasuresDiscrete.py index 1d9c3db3bb..fff12f2d75 100644 --- a/framework/Models/PostProcessors/RiskMeasuresDiscrete.py +++ b/framework/Models/PostProcessors/RiskMeasuresDiscrete.py @@ -55,8 +55,6 @@ class cls. dataSub.addParam("freq", InputTypes.FloatType) inputSpecification.addSub(dataSub) inputSpecification.addSubSimple("temporalID", InputTypes.StringType) - #Should method be in super class? - inputSpecification.addSubSimple("method", contentType=InputTypes.StringType) return inputSpecification def __init__(self): @@ -160,17 +158,17 @@ def _handleInput(self, paramInput): elif child.getName() == 'temporalID': self.temporalID = child.value - elif child.getName() !='method': - self.raiseAnError(IOError, 'RiskMeasuresDiscrete Interfaced Post-Processor ' + str(self.name) + - ' : XML node ' + str(child) + ' is not recognized') - - def run(self,inputDic): + def run(self,inputIn): """ This method perform the actual calculation of the risk measures - @ In, inputDic, list, list of dictionaries which contains the data inside the input DataObjects - @ Out, outputDic, dict, dictionary which contains the risk measures + @ In, inputIn, dict, dictionary contains the input data and input files, i.e., + {'Data':[DataObjects.asDataset('dict')], 'Files':[FileObject]}, only 'Data' + will be used by this PostProcessor + @ Out, outputDic, dict, dictionary which contains the risk measures, i.e., + {'data':dict of realizations, 'dim':{varName:independent dimensions that the variable depends on}} """ # Check how many HistorySets (checkHSs) have been provided + inputDic = inputIn['Data'] checkHSs=0 for inp in inputDic: if inp['type'] == 'HistorySet': diff --git a/framework/PluginsBaseClasses/PostProcessorPluginBase.py b/framework/PluginsBaseClasses/PostProcessorPluginBase.py index ff49e0b8d1..2b48bbe38a 100644 --- a/framework/PluginsBaseClasses/PostProcessorPluginBase.py +++ b/framework/PluginsBaseClasses/PostProcessorPluginBase.py @@ -17,15 +17,10 @@ @author: wangc """ -#External Modules--------------------------------------------------------------- -import abc -#External Modules End----------------------------------------------------------- - #Internal Modules--------------------------------------------------------------- import Files from utils import InputData, InputTypes from DataObjects import DataObject -from Databases import Database from .PluginBase import PluginBase from Models.PostProcessors.PostProcessorInterface import PostProcessorInterface #Internal Modules End----------------------------------------------------------- @@ -35,49 +30,10 @@ class PostProcessorPluginBase(PostProcessorInterface, PluginBase): This class represents a specialized class from which each PostProcessor plugins must inherit from """ # List containing the methods that need to be checked in order to assess the - # validity of a certain plugin. This list needs to be populated by the derived class + # validity of a certain plugin. _methodsToCheck = ['getInputSpecification', '_handleInput', 'run'] entityType = 'PostProcessor' - ################################################## - # Methods for Internal Use - ################################################## - def createNewInput(self,inputObjs,samplerType,**kwargs): - """ - This function is used to convert internal DataObjects to user-friendly format of data. - The output from this function will be directly passed to the "run" method. - @ In, inputObjs, list, list of DataObjects - @ In, samplerType, string, is the type of sampler that is calling to generate a new input. - Not used for PostProcessor, and "None" is used during "Step" "PostProcess" handling - @ In, **kwargs, dict, is a dictionary that contains the information coming from the sampler, - a mandatory key is the sampledVars'that contains a dictionary {'name variable':value}. - Not used for PostProcessor, and {'SampledVars':{'prefix':'None'}, 'additionalEdits':{}} - is used during "Step" "PostProcess" handling - @ Out, inputDs, list, list of data set that will be directly used by the "PostProcessor.run" method. - """ - #### TODO: This method probably need to move to PostProcessor Base Class when we have converted - #### all internal PostProcessors to use Dataset - - ## Type 1: DataObjects => Dataset - ## Type 2: File => File - ## Type 3: HDF5 => ? - assert type(inputObjs) == list - inputDs = [] - for inp in inputObjs: - if isinstance(inp, Files.File): - inputDs.append(inp) - elif isinstance(inp, DataObject.DataObject): - # Current accept two types: 1) 'dict', 2) 'xrDataset' - # Set default to 'dict', this is consistent with current post-processors - outType = kwargs.get('outType', 'dict') - inputDs.append(inp.asDataset(outType=outType)) - elif isinstance(inp, Database): - self.raiseAnError(IOError, "Database", inp.name, "can not be handled directly by this Post Processor") - else: - self.raiseAnError(IOError, "Unknown input is found", str(inp)) - return inputDs - - ################################################## # Plugin APIs ################################################## @@ -100,6 +56,26 @@ def __init__(self): @ Out, None """ super().__init__() + self._inputDataType = 'dict' # Current accept two types: 1) 'dict', 2) 'xrDataset' + # Set default to 'dict', this is consistent with current post-processors + + def setInputDataType(self, dataType='dict'): + """ + Method to set the input data type that will be passed to "run" method + @ In, dataType, str, the data type to which the internal DataObjects will be converted + @ Out, None + """ + if dataType not in ['dict', 'xrDataset']: + self.raiseAnError(IOError, 'The dataType "{}" is not supported, please consider using "dict" or "xrDataset"'.format(dataType)) + self._inputDataType = dataType + + def getInputDataType(self): + """ + Method to retrieve the input data type to which the internal DataObjects will be converted + @ In, None + @ Out, _inputDataType, str, the data type, i.e., 'dict', 'xrDataset' + """ + return self._inputDataType def initialize(self, runInfo, inputs, initDict=None): """ @@ -125,5 +101,35 @@ def _handleInput(self, paramInput): # """ # This method executes the postprocessor action. # @ In, inputDs, list, list of Datasets - # @ Out, outputDs, dict, xarray.Dataset, pd.DataFrame + # @ Out, outputDs, dict, xarray.Dataset # """ + + ################################################## + # Methods for Internal Use + ################################################## + + def createPostProcessorInput(self, inputObjs, **kwargs): + """ + This function is used to convert internal DataObjects to user-friendly format of data. + The output from this function will be directly passed to the "run" method. + @ In, inputObjs, list, list of DataObjects + @ In, **kwargs, dict, is a dictionary that contains the information passed by "Step". + Currently not used by PostProcessor. It can be useful by Step to control the input + and output of the PostProcessor, as well as other control options for the PostProcessor + @ Out, inputDict, list, list of data set that will be directly used by the "PostProcessor.run" method. + """ + #### TODO: This method probably need to move to PostProcessor Base Class when we have converted + #### all internal PostProcessors to use Dataset + ## Type 1: DataObjects => Dataset or Dict + ## Type 2: File => File + assert type(inputObjs) == list + inputDict = {'Data':[], 'Files':[]} + for inp in inputObjs: + if isinstance(inp, Files.File): + inputDict['Files'].append(inp) + elif isinstance(inp, DataObject.DataObject): + dataType = self.getInputDataType() + inputDict['Data'].append(inp.asDataset(outType=dataType)) + else: + self.raiseAnError(IOError, "Unknown input is found", str(inp)) + return inputDict diff --git a/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscrete.xml b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscrete.xml index 7193251031..5bc8701726 100644 --- a/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscrete.xml +++ b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscrete.xml @@ -8,6 +8,13 @@ Tests of the four risk importance measures: Risk Achievement Worth (RAW), Risk Reduction Worth (RRW), Birnbaum (B) and Fussell-Vesely (FV) + + + Move 'RiskMeasuresDiscrete' from InterfacedPostProcessor to Plugin-Type PostProcessor. + The subType of this PostProcessor will be changed to 'RiskMeasuresDiscrete', + and the 'method' node under this PostProcessor is no longer needed. + + @@ -25,7 +32,6 @@ pump1Time,pump2Time,valveTime,Tmax,outcome,pump1State,pump2State,valveState,failureTime - riskMeasuresDiscrete B,FV,RAW,RRW pump1Time pump2Time diff --git a/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscreteMultipleIE.xml b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscreteMultipleIE.xml index 27bb65809a..425dcdd748 100644 --- a/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscreteMultipleIE.xml +++ b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMeasuresDiscreteMultipleIE.xml @@ -8,6 +8,13 @@ Tests of the four risk importance measures for multiple IEs: Risk Achievement Worth (RAW), Risk Reduction Worth (RRW), Birnbaum (B) and Fussell-Vesely (FV) + + + Move 'RiskMeasuresDiscrete' from InterfacedPostProcessor to Plugin-Type PostProcessor. + The subType of this PostProcessor will be changed to 'RiskMeasuresDiscrete', + and the 'method' node under this PostProcessor is no longer needed. + + @@ -24,7 +31,6 @@ Bstatus,Cstatus,Dstatus,outcome - riskMeasuresDiscrete B,FV,RAW,RRW Astatus Bstatus diff --git a/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMonitor.xml b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMonitor.xml index a47bc9527b..75e96c88b8 100644 --- a/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMonitor.xml +++ b/tests/framework/PostProcessors/RiskMeasuresDiscrete/test_riskMonitor.xml @@ -9,6 +9,13 @@ Tests of the four risk importance measures for time dependent data: Risk Achievement Worth (RAW), Risk Reduction Worth (RRW), Birnbaum (B) and Fussell-Vesely (FV) + + + Move 'RiskMeasuresDiscrete' from InterfacedPostProcessor to Plugin-Type PostProcessor. + The subType of this PostProcessor will be changed to 'RiskMeasuresDiscrete', + and the 'method' node under this PostProcessor is no longer needed. + + @@ -26,7 +33,6 @@ Astatus,Bstatus,Cstatus,outcome - riskMeasuresDiscrete B,FV,RAW,RRW,R0 Astatus Bstatus From 58245a486fa0a6dd921a04c9aefc8b5d0fa983d9 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 22 Apr 2021 21:40:14 -0600 Subject: [PATCH 24/51] fix MCImporter --- framework/Models/PostProcessor.py | 2 +- framework/Models/PostProcessors/MCSimporter.py | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/framework/Models/PostProcessor.py b/framework/Models/PostProcessor.py index a63c16661f..31dd16fa29 100644 --- a/framework/Models/PostProcessor.py +++ b/framework/Models/PostProcessor.py @@ -91,7 +91,7 @@ def specializeValidateDict(cls): cls.validateDict['Output'].append(cls.testDict.copy()) cls.validateDict['Output' ][0]['class' ] = 'DataObjects' cls.validateDict['Output' ][0]['type' ] = ['PointSet','HistorySet','DataSet'] - cls.validateDict['Output' ][0]['required' ] = True + cls.validateDict['Output' ][0]['required' ] = False cls.validateDict['Output' ][0]['multiplicity'] = 'n' cls.validateDict['Output'].append(cls.testDict.copy()) cls.validateDict['Output' ][1]['class' ] = 'OutStreams' diff --git a/framework/Models/PostProcessors/MCSimporter.py b/framework/Models/PostProcessors/MCSimporter.py index f67166addc..8c3a6b1189 100644 --- a/framework/Models/PostProcessors/MCSimporter.py +++ b/framework/Models/PostProcessors/MCSimporter.py @@ -81,10 +81,13 @@ def _handleInput(self, paramInput): def run(self, inputs): """ This method executes the PostProcessor action. - @ In, inputs, list, list of file objects - @ Out, None + @ In, inputIn, dict, dictionary contains the input data and input files, i.e., + {'Data':[DataObjects.asDataset('dict')], 'Files':[FileObject]}, only 'Files' + will be used by this PostProcessor + @ Out, mcsPointSet, dict, dictionary of outputs, i.e., + {'data':dict of realizations, 'dim':{}} """ - + inputs = inputIn['Files'] mcsFileFound = False beFileFound = False From 42a665315a853620f59c9229ea483a800a933884 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 23 Apr 2021 08:53:01 -0600 Subject: [PATCH 25/51] split PostProcessor document into separate files --- doc/user_manual/Makefile | 4 +- .../{ => PostProcessors}/DataMining.tex | 0 .../InterfacedPostProcessors.tex | 593 +++++++++++++ .../PostProcessors/RavenOutput.tex | 161 ++++ doc/user_manual/postprocessor.tex | 814 +----------------- .../Models/PostProcessors/MCSimporter.py | 2 +- 6 files changed, 788 insertions(+), 786 deletions(-) rename doc/user_manual/{ => PostProcessors}/DataMining.tex (100%) create mode 100644 doc/user_manual/PostProcessors/InterfacedPostProcessors.tex create mode 100644 doc/user_manual/PostProcessors/RavenOutput.tex diff --git a/doc/user_manual/Makefile b/doc/user_manual/Makefile index 8bb0b00774..9ce9b39d63 100644 --- a/doc/user_manual/Makefile +++ b/doc/user_manual/Makefile @@ -1,8 +1,8 @@ SRCFILE = raven_user_manual -# MANUAL_FILES = optimizer.tex rom.tex postprocessor.tex database_data.tex OutStreamSystem.tex sampler.tex existing_interfaces.tex ProbabilityDistributions.tex step.tex functions.tex ravenStructure.tex Summary.tex introduction.tex raven_user_manual.tex model.tex runInfo.tex libraries.tex DataMining.tex HowToRun.tex metrics.tex MANUAL_FILES = generated/optimizer.tex rom.tex kerasROM.tex postprocessor.tex database_data.tex OutStreamSystem.tex sampler.tex variablegroups.tex \ existing_interfaces.tex ProbabilityDistributions.tex step.tex functions.tex ravenStructure.tex Summary.tex \ -introduction.tex raven_user_manual.tex model.tex runInfo.tex libraries.tex DataMining.tex HowToRun.tex metrics.tex \ +introduction.tex raven_user_manual.tex model.tex runInfo.tex libraries.tex PostProcessors/DataMining.tex \ +PostProcessors/InterfacedPostProcessors.tex HowToRun.tex metrics.tex \ Installation/clone.tex Installation/conda.tex Installation/linux.tex Installation/macosx.tex Installation/main.tex \ Installation/overview.tex Installation/windows.tex advanced_users_templates.tex LATEX_FLAGS=-interaction=nonstopmode diff --git a/doc/user_manual/DataMining.tex b/doc/user_manual/PostProcessors/DataMining.tex similarity index 100% rename from doc/user_manual/DataMining.tex rename to doc/user_manual/PostProcessors/DataMining.tex diff --git a/doc/user_manual/PostProcessors/InterfacedPostProcessors.tex b/doc/user_manual/PostProcessors/InterfacedPostProcessors.tex new file mode 100644 index 0000000000..081a33c285 --- /dev/null +++ b/doc/user_manual/PostProcessors/InterfacedPostProcessors.tex @@ -0,0 +1,593 @@ +\subsubsection{Interfaced} +\label{Interfaced} +The \textbf{Interfaced} post-processor is a Post-Processor that allows the user +to create its own Post-Processor. While the External Post-Processor (see +Section~\ref{External} allows the user to create case-dependent +Post-Processors, with this new class the user can create new general +purpose Post-Processors. +% + +\ppType{Interfaced}{Interfaced} + +\begin{itemize} + \item \xmlNode{method}, \xmlDesc{comma separated string, required field}, + lists the method names of a method that will be computed (each + returning a post-processing value). All available methods need to be included + in the ``/raven/framework/PostProcessorFunctions/'' folder +\end{itemize} + +\textbf{Example:} +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + testInterfacedPP + + + ... + + ... + +\end{lstlisting} + +All the \textbf{Interfaced} post-processors need to be contained in the +``/raven/framework/PostProcessorFunctions/'' folder. In fact, once the +\textbf{Interfaced} post-processor is defined in the RAVEN input file, RAVEN +search that the method of the post-processor is located in such folder. + +The class specified in the \textbf{Interfaced} post-processor has to inherit the +PostProcessorInterfaceBase class and the user must specify this set of +methods: +\begin{itemize} + \item initialize: in this method, the internal parameters of the + post-processor are initialized. Mandatory variables that needs to be + specified are the following: +\begin{itemize} + \item self.inputFormat: type of dataObject expected in input + \item self.outputFormat: type of dataObject generated in output +\end{itemize} + \item readMoreXML: this method is in charge of reading the PostProcessor xml + node, parse it and fill the PostProcessor internal variables. + \item run: this method performs the desired computation of the dataObject. +\end{itemize} + +\begin{lstlisting}[language=python] +from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase +class testInterfacedPP(PostProcessorInterfaceBase): + def initialize(self) + def readMoreXML(self,xmlNode) + def run(self,inputDic) +\end{lstlisting} + +\paragraph{Data Format} +The user is not allowed to modify directly the DataObjects, however the +content of the DataObjects is available in the form of a python dictionary. +Both the dictionary give in input and the one generated in the output of the +PostProcessor are structured as follows: + +\begin{lstlisting}[language=python] +inputDict = {'data':{}, 'metadata':{}} +\end{lstlisting} + +where: + +\begin{lstlisting}[language=python] +inputDict['data'] = {'input':{}, 'output':{}} +\end{lstlisting} + +In the input dictonary, each input variable is listed as a dictionary that +contains a numpy array with its own values as shown below for a simplified +example + +\begin{lstlisting}[language=python] +inputDict['data']['input'] = {'inputVar1': array([ 1.,2.,3.]), + 'inputVar2': array([4.,5.,6.])} +\end{lstlisting} + +Similarly, if the dataObject is a PointSet then the output dictionary is +structured as follows: + +\begin{lstlisting}[language=python] +inputDict['data']['output'] = {'outputVar1': array([ .1,.2,.3]), + 'outputVar2':array([.4,.5,.6])} +\end{lstlisting} + +Howevers, if the dataObject is a HistorySet then the output dictionary is +structured as follows: + +\begin{lstlisting}[language=python] +inputDict['data']['output'] = {'hist1': {}, 'hist2':{}} +\end{lstlisting} + +where + +\begin{lstlisting}[language=python] +inputDict['output']['data'][hist1] = {'time': array([ .1,.2,.3]), + 'outputVar1':array([ .4,.5,.6])} +inputDict['output']['data'][hist2] = {'time': array([ .1,.2,.3]), + 'outputVar1':array([ .14,.15,.16])} +\end{lstlisting} + + +\paragraph{Method: HStoPSOperator} + +This Post-Processor performs the conversion from HistorySet to PointSet performing a projection of the output space. + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are available: + +\begin{itemize} + \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable. Default is ``time''. + \nb Used just in case the \xmlNode{pivotValue}-based operation is requested + \item \xmlNode{operator}, \xmlDesc{string, optional field}, the operation to perform on the output space: + \begin{itemize} + \item \textbf{min}, compute the minimum of each variable along each single history + \item \textbf{max}, compute the maximum of each variable along each single history + \item \textbf{average}, compute the average of each variable along each single history + \item \textbf{all}, join together all of the each variable in + the history, and make the pivotParameter a regular + parameter. Unlike the min and max operators, this keeps + all the data, just organized differently. This operator + does this by propogating the other input parameters for + each item of the pivotParameter. + Table~\ref{operator_all_switch_before} shows an example + HistorySet with input parameter x, pivot parameter t, and + output parameter b and then + Table~\ref{operator_all_switch_after} shows the resulting + PointSet with input parameters x and t, and output + parameter b. Note that which parameters are input and which + are output in the resulting PointSet depends on the + DataObject specification. + \end{itemize} + \nb This node can be inputted only if \xmlNode{pivotValue} and \xmlNode{row} are not present + \item \xmlNode{pivotValue}, \xmlDesc{float, optional field}, the value of the pivotParameter with respect to the other outputs need to be extracted. + \nb This node can be inputted only if \xmlNode{operator} and \xmlNode{row} are not present + \item \xmlNode{pivotStrategy}, \xmlDesc{string, optional field}, The strategy to use for the pivotValue: + \begin{itemize} + \item \textbf{nearest}, find the value that is the nearest with respect the \xmlNode{pivotValue} + \item \textbf{floor}, find the value that is the nearest with respect to the \xmlNode{pivotValue} but less then the \xmlNode{pivotValue} + \item \textbf{celing}, find the value that is the nearest with respect to the \xmlNode{pivotValue} but greater then the \xmlNode{pivotValue} + \item \textbf{interpolate}, if the exact \xmlNode{pivotValue} can not be found, interpolate using a linear approach + \end{itemize} + + \nb Valid just in case \xmlNode{pivotValue} is present + \item \xmlNode{row}, \xmlDesc{int, optional field}, the row index at which the outputs need to be extracted. + \nb This node can be inputted only if \xmlNode{operator} and \xmlNode{pivotValue} are not present +\end{itemize} + +This example will show how the XML input block would look like: + +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + HStoPSOperator + -1 + + + HStoPSOperator + time + 0.3 + + + HStoPSOperator + time + max + + + HStoPSOperator + time + min + + + HStoPSOperator + time + average + + ... + + ... + +\end{lstlisting} + +\begin{table}[!hbtp] + \caption{Starting HistorySet for operator all} + \label{operator_all_switch_before} +\begin{tabular}{l|l|l} + x & t & b \\ + \hline + 5.0 & & \\ + \hline + & 1.0 & 6.0 \\ + \hline + & 2.0 & 7.0 \\ +\end{tabular} +\end{table} + +\begin{table}[!hbtp] + \caption{Resulting PointSet after operator all} + \label{operator_all_switch_after} +\begin{tabular}{l|l|l} + x & t & b \\ + \hline + 5.0 & 1.0 & 6.0 \\ + \hline + 5.0 & 2.0 & 7.0 \\ +\end{tabular} +\end{table} + +\paragraph{Method: HistorySetSampling} +This Post-Processor performs the conversion from HistorySet to HistorySet +The conversion is made so that each history H is re-sampled accordingly to a +specific sampling strategy. +It can be used to reduce the amount of space required by the HistorySet. + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{samplingType}, \xmlDesc{string, required field}, specifies the type of sampling method to be used: + \begin{itemize} + \item uniform: the set of \xmlNode{numberOfSamples} samples are uniformly distributed along the time axis + \item firstDerivative: the set of \xmlNode{numberOfSamples} samples are distributed along the time axis in regions with + higher first order derivative + \item secondDerivative: the set of \xmlNode{numberOfSamples} samples are distributed along the time axis in regions with + higher second order derivative + \item filteredFirstDerivative: samples are located where the first derivative is greater than the specified \xmlNode{tolerance} value + (hence, the number of samples can vary from history to history) + \item filteredSecondDerivative: samples are located where the second derivative is greater than the specified \xmlNode{tolerance} value + (hence, the number of samples can vary from history to history) + \end{itemize} + \item \xmlNode{numberOfSamples}, \xmlDesc{integer, optional field}, number of samples (required only for the following sampling + types: uniform, firstDerivative secondDerivative) + \item \xmlNode{pivotParameter}, \xmlDesc{string, required field}, ID of the temporal variable + \item \xmlNode{interpolation}, \xmlDesc{string, optional field}, type of interpolation to be employed for the history reconstruction + (required only for the following sampling types: uniform, firstDerivative secondDerivative). + Valid types of interpolation to specified: linear, nearest, zero, slinear, quadratic, cubic, intervalAverage + \item \xmlNode{tolerance}, \xmlDesc{string, optional field}, tolerance level (required only for the following sampling types: + filteredFirstDerivative or filteredSecondDerivative) +\end{itemize} + +\paragraph{Method: HistorySetSync} +This Post-Processor performs the conversion from HistorySet to HistorySet +The conversion is made so that all histories are synchronized in time. +It can be used to allow the histories to be sampled at the same time instant. + +There are two possible synchronization methods, specified through the \xmlNode{syncMethod} node. If the +\xmlNode{syncMethod} is \xmlString{grid}, a \xmlNode{numberOfSamples} node is specified, +which yields an equally-spaced grid of time points. The output values for these points will be linearly derived +using nearest sampled time points, and the new HistorySet will contain only the new grid points. + +The other methods are used by specifying \xmlNode{syncMethod} as \xmlString{all}, \xmlString{min}, or +\xmlString{max}. For \xmlString{all}, the postprocessor will iterate through the +existing histories, collect all the time points used in any of them, and use these as the new grid on which to +establish histories, retaining all the exact original values and interpolating linearly where necessary. +In the event of \xmlString{min} or \xmlString{max}, the postprocessor will find the smallest or largest time +history, respectively, and use those time values as nodes to interpolate between. + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{pivotParameter}, \xmlDesc{string, required field}, ID of the temporal variable + \item \xmlNode{extension}, \xmlDesc{string, required field}, type of extension when the sync process goes outside the boundaries of the history (zeroed or extended) + \item \xmlNode{syncMethod}, \xmlDesc{string, required field}, synchronization strategy to employ (see + description above). Options are \xmlString{grid}, \xmlString{all}, \xmlString{max}, \xmlString{min}. + \item \xmlNode{numberOfSamples}, \xmlDesc{integer, optional field}, required if \xmlNode{syncMethod} is + \xmlString{grid}, number of new time samples +\end{itemize} + +\paragraph{Method: HistorySetSnapShot} +This Post-Processor performs a conversion from HistorySet to PointSet. +The conversion is made so that each history $H$ is converted to a single point $P$. +There are several methods that can be employed to choose the single point from the history: +\begin{itemize} + \item min: Take a time slice when the \xmlNode{pivotVar} is at its smallest value, + \item max: Take a time slice when the \xmlNode{pivotVar} is at its largest value, + \item average: Take a time slice when the \xmlNode{pivotVar} is at its time-weighted average value, + \item value: Take a time slice when the \xmlNode{pivotVar} \emph{first passes} its specified value, + \item timeSlice: Take a time slice index from the sampled time instance space. +\end{itemize} +To demonstrate the timeSlice, assume that each history H is a dict of n output variables $x_1=[...], +x_n=[...]$, then the resulting point P is at time instant index t: $P=[x_1[t],...,x_n[t]]$. + +Choosing one the these methods for the \xmlNode{type} node will take a time slice for all the variables in the +output space based on the provided parameters. Alternatively, a \xmlString{mixed} type can be used, in which +each output variable can use a different time slice parameter. In other words, you can take the max of one +variable while taking the minimum of another, etc. + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{type}, \xmlDesc{string, required field}, type of operation: \xmlString{min}, \xmlString{max}, + \xmlString{average}, \xmlString{value}, \xmlString{timeSlice}, or \xmlString{mixed} + \item \xmlNode{extension}, \xmlDesc{string, required field}, type of extension when the sync process goes outside the boundaries of the history (zeroed or extended) + \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, name of the temporal variable. Required for the + \xmlString{average} and \xmlString{timeSlice} methods. +\end{itemize} + +If a \xmlString{timeSlice} type is in use, the following nodes also are required: +\begin{itemize} + \item \xmlNode{timeInstant}, \xmlDesc{integer, required field}, required and only used in the + \xmlString{timeSlice} type. Location of the time slice (integer index) + \item \xmlNode{numberOfSamples}, \xmlDesc{integer, required field}, number of samples +\end{itemize} + +If instead a \xmlString{min}, \xmlString{max}, \xmlString{average}, or \xmlString{value} is used, the following nodes +are also required: +\begin{itemize} + \item \xmlNode{pivotVar}, \xmlDesc{string, required field}, Name of the chosen indexing variable (the + variable whose min, max, average, or value is used to determine the time slice) + \item \xmlNode{pivotVal}, \xmlDesc{float, optional field}, required for \xmlString{value} type, the value for the chosen variable +\end{itemize} + +Lastly, if a \xmlString{mixed} approach is used, the following nodes apply: +\begin{itemize} + \item \xmlNode{max}, \xmlDesc{string, optional field}, the names of variables whose output should be their + own maximum value within the history. + \item \xmlNode{min}, \xmlDesc{string, optional field}, the names of variables whose output should be their + own minimum value within the history. + \item \xmlNode{average}, \xmlDesc{string, optional field}, the names of variables whose output should be their + own average value within the history. Note that a \xmlNode{pivotParameter} node is required to perform averages. + \item \xmlNode{value}, \xmlDesc{string, optional field}, the names of variables whose output should be taken + at a time slice determined by another variable. As with the non-mixed \xmlString{value} type, the first + time the \xmlAttr{pivotVar} crosses the specified \xmlAttr{pivotVal} will be the time slice taken. + This node requires two attributes, if used: + \begin{itemize} + \item \xmlAttr{pivotVar}, \xmlDesc{string, required field}, the name of the variable on which the time + slice will be performed. That is, if we want the value of $y$ when $t=0.245$, + this attribute would be \xmlString{t}. + \item \xmlAttr{pivotVal}, \xmlDesc{float, required field}, the value of the \xmlAttr{pivotVar} on which the time + slice will be performed. That is, if we want the value of $y$ when $t=0.245$, + this attribute would be \xmlString{0.245}. + \end{itemize} + Note that all the outputs of the \xmlNode{DataObject} output of this postprocessor must be listed under one + of the \xmlString{mixed} node types in order for values to be returned. +\end{itemize} + +\textbf{Example (mixed):} +This example will output the average value of $x$ for $x$, the value of $y$ at +time$=0.245$ for $y$, and the value of $z$ at $x=4.0$ for $z$. +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + HistorySetSnapShot + mixed + x + y + z + time + zeroed + + ... + + ... + +\end{lstlisting} + + +\paragraph{Method: HS2PS} + +This Post-Processor performs a conversion from HistorySet to PointSet. +The conversion is made so that each history $H$ is converted to a single point $P$. +Assume that each history $H$ is a dict of $n$ output variables $x_1=[...],x_n=[...]$, then the resulting point $P$ is $P=concat(x_1,...,x_n)$. +Note: it is here assumed that all histories have been sync so that they have the same length, start point and end point. If you are not sure, do a pre-processing the the original history set. + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified (min, max, avg and value case): + +\begin{itemize} + \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable (only for avg) +\end{itemize} + +\paragraph{Method: TypicalHistoryFromHistorySet} +This Post-Processor performs a simplified procedure of \cite{wilcox2008users} to form a ``typical'' time series from multiple time series. The input should be a HistorySet, with each history in the HistorySet synchronized. For HistorySet that is not synchronized, use Post-Processor method \textbf{HistorySetSync} to synchronize the data before running this method. + +Each history in input HistorySet is first converted to multiple histories each has maximum time specified in \xmlNode{outputLen} (see below). Each converted history $H_i$ is divided into a set of subsequences $\{H_i^j\}$, and the division is guided by the \xmlNode{subseqLen} node specified in the input XML. The value of \xmlNode{subseqLen} should be a list of positive numbers that specify the length of each subsequence. If the number of subsequence for each history is more than the number of values given in \xmlNode{subseqLen}, the values in \xmlNode{subseqLen} would be reused. + +For each variable $x$, the method first computes the empirical CDF (cumulative density function) by using all the data values of $x$ in the HistorySet. This CDF is termed as long-term CDF for $x$. Then for each subsequence $H_i^j$, the method computes the empirical CDF by using all the data values of $x$ in $H_i^j$. This CDF is termed as subsequential CDF. For the first interval window (i.e., $j=1$), the method computes the Finkelstein-Schafer (FS) statistics \cite{finkelstein1971improved} between the long term CDF and the subsequential CDF of $H_i^1$ for each $i$. The FS statistics is defined as following. +\begin{align*} +FS & = \sum_x FS_x\\ +FS_x &= \frac{1}{N}\sum_{n=1}^N\delta_n +\end{align*} +where $N$ is the number of value reading in the empirical CDF and $\delta_n$ is the absolute difference between the long term CDF and the subsequential CDF at value $x_n$. The subsequence $H_i^1$ with minimal FS statistics will be selected as the typical subsequence for the interval window $j=1$. Such process repeats for $j=2,3,\dots$ until all subsequences have been processed. Then all the typical subsequences will be concatenated to form a complete history. + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable + \default{Time} + \item \xmlNode{subseqLen}, \xmlDesc{integers, required field}, length of the divided subsequence (see above) + \item \xmlNode{outputLen}, \xmlDesc{integer, optional field}, maximum value of the temporal variable for the generated typical history + \default{Maximum value of the variable with name of \xmlNode{pivotParameter}} +\end{itemize} + +For example, consider history of data collected over three years in one-second increments, +where the user wants a single \emph{typical year} extracted from the data. +The user wants this data constructed by combining twelve equal \emph{typical month} +segments. In this case, the parameter \xmlNode{outputLen} should be \texttt{31536000} (the number of seconds +in a year), while the parameter \xmlNode{subseqLen} should be \texttt{2592000} (the number of seconds in a +month). Using a value for \xmlNode{subseqLen} that is either much, much smaller than \xmlNode{outputLen} or +of equal size to \xmlNode{outputLen} might have unexpected results. In general, we recommend using a +\xmlNode{subseqLen} that is roughly an order of magnitude smaller than \xmlNode{outputLen}. + +\paragraph{Method: dataObjectLabelFilter} +This Post-Processor allows to filter the portion of a dataObject, either PointSet or HistorySet, with a given clustering label. +A clustering algorithm associates a unique cluster label to each element of the dataObject (PointSet or HistorySet). +This cluster label is a natural number ranging from $0$ (or $1$ depending on the algorithm) to $N$ where $N$ is the number of obtained clusters. +Recall that some clustering algorithms (e.g., K-Means) receive $N$ as input while others (e.g., Mean-Shift) determine $N$ after clustering has been performed. +Thus, this Post-Processor is naturally employed after a data-mining clustering techniques has been performed on a dataObject so that each clusters +can be analyzed separately. + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independently of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{label}, \xmlDesc{string, required field}, name of the clustering label + \item \xmlNode{clusterIDs}, \xmlDesc{integers, required field}, ID of the selected clusters. Note that more than one ID can be provided as input +\end{itemize} + + +\paragraph{Method: Discrete Risk Measures} +This Post-Processor calculates a series of risk importance measures from a PointSet. This calculation if performed for a set of input parameters given an output target. + +The user is required to provide the following information: +\begin{itemize} + \item the set of input variables. For each variable the following need to be specified: + \begin{itemize} + \item the set of values that imply a reliability value equal to $1$ for the input variable + \item the set of values that imply a reliability value equal to $0$ for the input variable + \end{itemize} + \item the output target variable. For this variable it is needed to specify the values of the output target variable that defines the desired outcome. +\end{itemize} + +The following variables are first determined for each input variable $i$: +\begin{itemize} + \item $R_0$ Probability of the outcome of the output target variable (nominal value) + \item $R^{+}_i$ Probability of the outcome of the output target variable if reliability of the input variable is equal to $0$ + \item $R^{-}_i$ Probability of the outcome of the output target variable if reliability of the input variable is equal to $1$ +\end{itemize} + +Available measures are: +\begin{itemize} + \item Risk Achievement Worth (RAW): $RAW = R^{+}_i / R_0 $ + \item Risk Achievement Worth (RRW): $RRW = R_0 / R^{-}_i$ + \item Fussell-Vesely (FV): $FV = (R_0 - R^{-}_i) / R_0$ + \item Birnbaum (B): $B = R^{+}_i - R^{-}_i$ +\end{itemize} + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{measures}, \xmlDesc{string, required field}, desired risk importance measures that have to be computed (RRW, RAW, FV, B) + \item \xmlNode{variable}, \xmlDesc{string, required field}, ID of the input variable. This node is provided for each input variable. This nodes needs to contain also these attributes: + \begin{itemize} + \item \xmlAttr{R0values}, \xmlDesc{float, required field}, interval of values (comma separated values) that implies a reliability value equal to $0$ for the input variable + \item \xmlAttr{R1values}, \xmlDesc{float, required field}, interval of values (comma separated values) that implies a reliability value equal to $1$ for the input variable + \end{itemize} + \item \xmlNode{target}, \xmlDesc{string, required field}, ID of the output variable. This nodes needs to contain also the attribute \xmlAttr{values}, \xmlDesc{string, required field}, interval of + values of the output target variable that defines the desired outcome +\end{itemize} + +\textbf{Example:} +This example shows an example where it is desired to calculate all available risk importance measures for two input variables (i.e., pumpTime and valveTime) +given an output target variable (i.e., Tmax). +A value of the input variable pumpTime in the interval $[0,240]$ implies a reliability value of the input variable pumpTime equal to $0$. +A value of the input variable valveTime in the interval $[0,60]$ implies a reliability value of the input variable valveTime equal to $0$. +A value of the input variables valveTime and pumpTime in the interval $[1441,2880]$ implies a reliability value of the input variables equal to $1$. +The desired outcome of the output variable Tmax occurs in the interval $[2200,2500]$. +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + RiskMeasuresDiscrete + B,FV,RAW,RRW + pumpTime + valveTime + Tmax + + ... + + ... + +\end{lstlisting} + +This Post-Processor allows the user to consider also multiple datasets (a data set for each initiating event) and calculate the global risk importance measures. +This can be performed by: +\begin{itemize} + \item Including all datasets in the step +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + outRun1 + outRun2 + riskMeasuresDiscrete + outPPS + PrintPPS_dump + + + ... + +\end{lstlisting} + \item Adding in the Post-processor the frequency of the initiating event associated to each dataset +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + riskMeasuresDiscrete + FV,RAW + Astatus + Bstatus + Cstatus + Dstatus + outcome + outRun1 + outRun2 + + ... + + ... + +\end{lstlisting} + +\end{itemize} + +This post-processor can be made time dependent if a single HistorySet is provided among the other data objects. +The HistorySet contains the temporal profiles of a subset of the input variables. This temporal profile can be only +boolean, i.e., 0 (component offline) or 1 (component online). +Note that the provided history set must contains a single History; multiple Histories are not allowed. +When this post-processor is in a dynamic configuration (i.e., time-dependent), the user is required to specify an xml +node \xmlNode{temporalID} that indicates the ID of the temporal variable. +For each time instant, this post-processor determines the temporal profiles of the desired risk importance measures. +Thus, in this case, an HistorySet must be chosen as an output data object. +An example is shown below: +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + riskMeasuresDiscrete + B,FV,RAW,RRW,R0 + Astatus + Bstatus + Cstatus + outcome + outRun1 + time + + ... + + ... + + ... + + outRun1 + timeDepProfiles + riskMeasuresDiscrete + outHS + PrintHS + + ... + + ... + +\end{lstlisting} diff --git a/doc/user_manual/PostProcessors/RavenOutput.tex b/doc/user_manual/PostProcessors/RavenOutput.tex new file mode 100644 index 0000000000..2eb805304e --- /dev/null +++ b/doc/user_manual/PostProcessors/RavenOutput.tex @@ -0,0 +1,161 @@ +\subsubsection{RavenOutput} +\label{RavenOutput} +The \textbf{RavenOutput} post-processor is specifically used +to gather data from RAVEN output files and generate a PointSet suitable for plotting or other analysis. +It can do this in two modes: static and dynamic. In static mode, the +PostProcessor reads from from several static XML output files produced by RAVEN. In dynamic mode, the PostProcessor +reads from a single dynamic XML output file and builds a PointSet where the pivot parameter (e.g. time) is the +input and the requested values are returned for each of the pivot parameter values (e.g. points in time). The +name for the pivot parameter will be taken directly from the XML structure. +% +Note: by default the PostProcessor operates in static mode; to read a dynamic file, the \xmlNode{dynamic} node must +be specified. +% +\ppType{RavenOutput}{RavenOutput} +% +\begin{itemize} + \item \xmlNode{dynamic}, \xmlDesc{string, optional field}, if included will trigger reading a single dynamic + file instead of multiple static files, unless the text of this field is \xmlString{false}, in which case it + will return to the default (multiple static files). \default(False) + \item \xmlNode{File}, \xmlDesc{XML Node, required field} + % + For each file to be read by this postprocessor, an entry in the \xmlNode{Files} node must be added, and a + \xmlNode{File} node must be added to the postprocessor input block. The \xmlNode{File} requires two + identifying attributes: + \begin{itemize} + \item \xmlAttr{name}, \xmlDesc{string, required field}, the RAVEN-assigned name of the file, + \item \xmlAttr{ID}, \xmlDesc{float, optional field}, the floating point ID that will be unique to this + file. This will appear as an entry in the output \xmlNode{DataObject} and the corresponding column are + the values extracted from this file. If not specified, RAVEN will attempt to find a suitable integer ID + to use, and a warning will be raised. + + When defining the \xmlNode{DataObject} that this postprocessor will write to, and when using the static + (non-\xmlNode{dynamic}) form of the postprocessor, the \xmlNode{input} space should be given as + \xmlString{ID}, and the output variables should be the outputs specified in the postprocessor. See the + examples below. In the data object, the variable values will be keyed on the \xmlString{ID} parameter. + \end{itemize} + Each value that needs to be extracted from the file needs to be specified by one of the following + \xmlNode{output} nodes within the \xmlNode{File} node: + \begin{itemize} + \item \xmlNode{output}, \xmlDesc{|-separated string, required field}, + the specification of the output to extract from the file. + RAVEN uses \texttt{xpath} as implemented in Python's \texttt{xml.etree} module to specify locations + in XML. For example, to search tags, use a path + separated by forward slash characters (``/''), starting under the root; this means the root node should not + be included in the path. See the example. For more details on xpath options available, see + \url{https://docs.python.org/2/library/xml.etree.elementtree.html#xpath-support}. + % + The \xmlNode{output} node requires the following attribute: + \begin{itemize} + \item \xmlAttr{name}, \xmlDesc{string, required field}, specifies the entry in the Data Object that + this value should be stored under. + \end{itemize} + + \end{itemize} + % +\end{itemize} +\textbf{Example (Static):} +Using an example, let us have two input files, named \emph{in1.xml} and \emph{in2.xml}. They appear as +follows. Note that the name of the variables we want changes slightly between the XML; this is fine. + +\textbf{\emph{in1.xml}} +\begin{lstlisting}[style=XML] + + + 6 + 7 + + +\end{lstlisting} +\textbf{\emph{in2.xml}} +\begin{lstlisting}[style=XML] + + + 6.1 + 7.1 + + +\end{lstlisting} + +The RAVEN input to extract this information would appear as follows. +We include an example of defining the \xmlNode{DataObject} that this postprocessor will write out to, for +further clarity. + +\begin{lstlisting}[style=XML] + + ... + + inp1.xml + inp2.xml + + ... + + ... + + + ans/val1 + ans/val2 + + + ans/first + ans/second + + + ... + + ... + + ... + + ID + first,second + + ... + + ... + +\end{lstlisting} + +\textbf{Example (Dynamic):} +For a dynamic example, consider this time-evolution of values example. \emph{inFile.xml} is a RAVEN dynamic +XML output. + +\textbf{\emph{in1.xml}} +\begin{lstlisting}[style=XML] + + +\end{lstlisting} +The RAVEN input to extract this information would appear as follows: +\begin{lstlisting}[style=XML] + + ... + + inFile.xml + + ... + + ... + + true + + ans|val1 + + + ... + + ... + +\end{lstlisting} +The resulting PointSet has \emph{time} as an input and \emph{first} as an output. diff --git a/doc/user_manual/postprocessor.tex b/doc/user_manual/postprocessor.tex index 7eafcccf61..5ae9d3eb59 100644 --- a/doc/user_manual/postprocessor.tex +++ b/doc/user_manual/postprocessor.tex @@ -118,7 +118,7 @@ \subsubsection{BasicStatistics} The matrix quantities available for request are: \begin{itemize} \item \textbf{sensitivity}: matrix of sensitivity coefficients, computed via linear regression method. (\nb The condition number is computed every time this quantity is requsted. If it results - to be greater then $30$, a multicollinearity problem exists and the sensitivity coefficients + to be greater then $30$, a multicollinearity problem exists and the sensitivity coefficients might be incorrect and a Warning is spooned by the code) \item \textbf{covariance}: covariance matrix \item \textbf{pearson}: matrix of correlation coefficients @@ -160,8 +160,8 @@ \subsubsection{BasicStatistics} \end{itemize} RAVEN will define a variable with name defined as: ``prefix for given \textbf{metric}'' + ``\_ste\_'' + ``parameter name'' to store standard error of given \textbf{metric} with respect to given parameter. This information will be stored in the DataObjects, - i.e. \textbf{PointSet} and \textbf{HistorySet}, and by default will be printed out in the ``CSV'' output files by the - \textbf{OutStreams}. Option node \xmlNode{what} can be used in the \textbf{OutStreams} to select the information that + i.e. \textbf{PointSet} and \textbf{HistorySet}, and by default will be printed out in the ``CSV'' output files by the + \textbf{OutStreams}. Option node \xmlNode{what} can be used in the \textbf{OutStreams} to select the information that the users want to print. In the case when the users want to store all the calculations results in general \textbf{DataSets}, RAVEN will employ a variable with name defined as: ``\textbf{metric}'' + ``\_ste'' to store standard error with respect to all target parameters. An additional @@ -543,7 +543,7 @@ \subsubsection{ImportanceRank} ``class'' the listed object is from, the only acceptable class for this post-processor is \xmlString{Distributions}; \item \xmlAttr{type}, \xmlDesc{required string attribute}, is the type of distributions, - the only acceptable type is \xmlString{MultivariateNormal} + the only acceptable type is \xmlString{MultivariateNormal} \end{itemize} \end{itemize} % @@ -815,14 +815,14 @@ \subsubsection{LimitSurfaceIntegral} \item \xmlNode{integralType}, \xmlDesc{string, optional field}, specifies the type of integrations that need to be used. Currently only MonteCarlo integration is available \default{MonteCarlo} - \item \xmlNode{computeBounds}, \xmlDesc{bool, optional field}, - activates the computation of the bounding error of the limit - surface integral ( maximum error in the identification of the - limit surface location). If True, the bounding error is stored + \item \xmlNode{computeBounds}, \xmlDesc{bool, optional field}, + activates the computation of the bounding error of the limit + surface integral ( maximum error in the identification of the + limit surface location). If True, the bounding error is stored in a variable named as \xmlNode{outputName} appending the suffix - ``\_err''. For example, if \xmlNode{outputName} is - ``EventProbability'', the bounding error will be stored as - ``EventProbability\_err'' (this variable name must be listed as + ``\_err''. For example, if \xmlNode{outputName} is + ``EventProbability'', the bounding error will be stored as + ``EventProbability\_err'' (this variable name must be listed as variable in the output DataObject). \default{False} \item \xmlNode{seed}, \xmlDesc{integer, optional field}, specifies the random number generator seed. @@ -1084,7 +1084,7 @@ \subsubsection{TopologicalDecomposition} \end{lstlisting} %%%%% PP DataMining %%%%%%% -\input{DataMining.tex} +\input{PostProcessors/DataMining.tex} %%%%% PP PrintCSV %%%%%%% %\paragraph{PrintCSV} @@ -1097,784 +1097,32 @@ \subsubsection{TopologicalDecomposition} % %%%%% PP External %%%%%%% -\subsubsection{Interfaced} -\label{Interfaced} -The \textbf{Interfaced} post-processor is a Post-Processor that allows the user -to create its own Post-Processor. While the External Post-Processor (see -Section~\ref{External} allows the user to create case-dependent -Post-Processors, with this new class the user can create new general -purpose Post-Processors. -% - -\ppType{Interfaced}{Interfaced} - -\begin{itemize} - \item \xmlNode{method}, \xmlDesc{comma separated string, required field}, - lists the method names of a method that will be computed (each - returning a post-processing value). All available methods need to be included - in the ``/raven/framework/PostProcessorFunctions/'' folder -\end{itemize} - -\textbf{Example:} -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - testInterfacedPP - - - ... - - ... - -\end{lstlisting} - -All the \textbf{Interfaced} post-processors need to be contained in the -``/raven/framework/PostProcessorFunctions/'' folder. In fact, once the -\textbf{Interfaced} post-processor is defined in the RAVEN input file, RAVEN -search that the method of the post-processor is located in such folder. - -The class specified in the \textbf{Interfaced} post-processor has to inherit the -PostProcessorInterfaceBase class and the user must specify this set of -methods: -\begin{itemize} - \item initialize: in this method, the internal parameters of the - post-processor are initialized. Mandatory variables that needs to be - specified are the following: -\begin{itemize} - \item self.inputFormat: type of dataObject expected in input - \item self.outputFormat: type of dataObject generated in output -\end{itemize} - \item readMoreXML: this method is in charge of reading the PostProcessor xml - node, parse it and fill the PostProcessor internal variables. - \item run: this method performs the desired computation of the dataObject. -\end{itemize} - -\begin{lstlisting}[language=python] -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase -class testInterfacedPP(PostProcessorInterfaceBase): - def initialize(self) - def readMoreXML(self,xmlNode) - def run(self,inputDic) -\end{lstlisting} - -\paragraph{Data Format} -The user is not allowed to modify directly the DataObjects, however the -content of the DataObjects is available in the form of a python dictionary. -Both the dictionary give in input and the one generated in the output of the -PostProcessor are structured as follows: - -\begin{lstlisting}[language=python] -inputDict = {'data':{}, 'metadata':{}} -\end{lstlisting} - -where: - -\begin{lstlisting}[language=python] -inputDict['data'] = {'input':{}, 'output':{}} -\end{lstlisting} - -In the input dictonary, each input variable is listed as a dictionary that -contains a numpy array with its own values as shown below for a simplified -example - -\begin{lstlisting}[language=python] -inputDict['data']['input'] = {'inputVar1': array([ 1.,2.,3.]), - 'inputVar2': array([4.,5.,6.])} -\end{lstlisting} - -Similarly, if the dataObject is a PointSet then the output dictionary is -structured as follows: - -\begin{lstlisting}[language=python] -inputDict['data']['output'] = {'outputVar1': array([ .1,.2,.3]), - 'outputVar2':array([.4,.5,.6])} -\end{lstlisting} - -Howevers, if the dataObject is a HistorySet then the output dictionary is -structured as follows: - -\begin{lstlisting}[language=python] -inputDict['data']['output'] = {'hist1': {}, 'hist2':{}} -\end{lstlisting} - -where - -\begin{lstlisting}[language=python] -inputDict['output']['data'][hist1] = {'time': array([ .1,.2,.3]), - 'outputVar1':array([ .4,.5,.6])} -inputDict['output']['data'][hist2] = {'time': array([ .1,.2,.3]), - 'outputVar1':array([ .14,.15,.16])} -\end{lstlisting} - - -\paragraph{Method: HStoPSOperator} - -This Post-Processor performs the conversion from HistorySet to PointSet performing a projection of the output space. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are available: - -\begin{itemize} - \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable. Default is ``time''. - \nb Used just in case the \xmlNode{pivotValue}-based operation is requested - \item \xmlNode{operator}, \xmlDesc{string, optional field}, the operation to perform on the output space: - \begin{itemize} - \item \textbf{min}, compute the minimum of each variable along each single history - \item \textbf{max}, compute the maximum of each variable along each single history - \item \textbf{average}, compute the average of each variable along each single history - \item \textbf{all}, join together all of the each variable in - the history, and make the pivotParameter a regular - parameter. Unlike the min and max operators, this keeps - all the data, just organized differently. This operator - does this by propogating the other input parameters for - each item of the pivotParameter. - Table~\ref{operator_all_switch_before} shows an example - HistorySet with input parameter x, pivot parameter t, and - output parameter b and then - Table~\ref{operator_all_switch_after} shows the resulting - PointSet with input parameters x and t, and output - parameter b. Note that which parameters are input and which - are output in the resulting PointSet depends on the - DataObject specification. - \end{itemize} - \nb This node can be inputted only if \xmlNode{pivotValue} and \xmlNode{row} are not present - \item \xmlNode{pivotValue}, \xmlDesc{float, optional field}, the value of the pivotParameter with respect to the other outputs need to be extracted. - \nb This node can be inputted only if \xmlNode{operator} and \xmlNode{row} are not present - \item \xmlNode{pivotStrategy}, \xmlDesc{string, optional field}, The strategy to use for the pivotValue: - \begin{itemize} - \item \textbf{nearest}, find the value that is the nearest with respect the \xmlNode{pivotValue} - \item \textbf{floor}, find the value that is the nearest with respect to the \xmlNode{pivotValue} but less then the \xmlNode{pivotValue} - \item \textbf{celing}, find the value that is the nearest with respect to the \xmlNode{pivotValue} but greater then the \xmlNode{pivotValue} - \item \textbf{interpolate}, if the exact \xmlNode{pivotValue} can not be found, interpolate using a linear approach - \end{itemize} - - \nb Valid just in case \xmlNode{pivotValue} is present - \item \xmlNode{row}, \xmlDesc{int, optional field}, the row index at which the outputs need to be extracted. - \nb This node can be inputted only if \xmlNode{operator} and \xmlNode{pivotValue} are not present -\end{itemize} - -This example will show how the XML input block would look like: - -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - HStoPSOperator - -1 - - - HStoPSOperator - time - 0.3 - - - HStoPSOperator - time - max - - - HStoPSOperator - time - min - - - HStoPSOperator - time - average - - ... - - ... - -\end{lstlisting} - -\begin{table}[!hbtp] - \caption{Starting HistorySet for operator all} - \label{operator_all_switch_before} -\begin{tabular}{l|l|l} - x & t & b \\ - \hline - 5.0 & & \\ - \hline - & 1.0 & 6.0 \\ - \hline - & 2.0 & 7.0 \\ -\end{tabular} -\end{table} - -\begin{table}[!hbtp] - \caption{Resulting PointSet after operator all} - \label{operator_all_switch_after} -\begin{tabular}{l|l|l} - x & t & b \\ - \hline - 5.0 & 1.0 & 6.0 \\ - \hline - 5.0 & 2.0 & 7.0 \\ -\end{tabular} -\end{table} - -\paragraph{Method: HistorySetSampling} -This Post-Processor performs the conversion from HistorySet to HistorySet -The conversion is made so that each history H is re-sampled accordingly to a -specific sampling strategy. -It can be used to reduce the amount of space required by the HistorySet. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{samplingType}, \xmlDesc{string, required field}, specifies the type of sampling method to be used: - \begin{itemize} - \item uniform: the set of \xmlNode{numberOfSamples} samples are uniformly distributed along the time axis - \item firstDerivative: the set of \xmlNode{numberOfSamples} samples are distributed along the time axis in regions with - higher first order derivative - \item secondDerivative: the set of \xmlNode{numberOfSamples} samples are distributed along the time axis in regions with - higher second order derivative - \item filteredFirstDerivative: samples are located where the first derivative is greater than the specified \xmlNode{tolerance} value - (hence, the number of samples can vary from history to history) - \item filteredSecondDerivative: samples are located where the second derivative is greater than the specified \xmlNode{tolerance} value - (hence, the number of samples can vary from history to history) - \end{itemize} - \item \xmlNode{numberOfSamples}, \xmlDesc{integer, optional field}, number of samples (required only for the following sampling - types: uniform, firstDerivative secondDerivative) - \item \xmlNode{pivotParameter}, \xmlDesc{string, required field}, ID of the temporal variable - \item \xmlNode{interpolation}, \xmlDesc{string, optional field}, type of interpolation to be employed for the history reconstruction - (required only for the following sampling types: uniform, firstDerivative secondDerivative). - Valid types of interpolation to specified: linear, nearest, zero, slinear, quadratic, cubic, intervalAverage - \item \xmlNode{tolerance}, \xmlDesc{string, optional field}, tolerance level (required only for the following sampling types: - filteredFirstDerivative or filteredSecondDerivative) -\end{itemize} - -\paragraph{Method: HistorySetSync} -This Post-Processor performs the conversion from HistorySet to HistorySet -The conversion is made so that all histories are synchronized in time. -It can be used to allow the histories to be sampled at the same time instant. - -There are two possible synchronization methods, specified through the \xmlNode{syncMethod} node. If the -\xmlNode{syncMethod} is \xmlString{grid}, a \xmlNode{numberOfSamples} node is specified, -which yields an equally-spaced grid of time points. The output values for these points will be linearly derived -using nearest sampled time points, and the new HistorySet will contain only the new grid points. - -The other methods are used by specifying \xmlNode{syncMethod} as \xmlString{all}, \xmlString{min}, or -\xmlString{max}. For \xmlString{all}, the postprocessor will iterate through the -existing histories, collect all the time points used in any of them, and use these as the new grid on which to -establish histories, retaining all the exact original values and interpolating linearly where necessary. -In the event of \xmlString{min} or \xmlString{max}, the postprocessor will find the smallest or largest time -history, respectively, and use those time values as nodes to interpolate between. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{pivotParameter}, \xmlDesc{string, required field}, ID of the temporal variable - \item \xmlNode{extension}, \xmlDesc{string, required field}, type of extension when the sync process goes outside the boundaries of the history (zeroed or extended) - \item \xmlNode{syncMethod}, \xmlDesc{string, required field}, synchronization strategy to employ (see - description above). Options are \xmlString{grid}, \xmlString{all}, \xmlString{max}, \xmlString{min}. - \item \xmlNode{numberOfSamples}, \xmlDesc{integer, optional field}, required if \xmlNode{syncMethod} is - \xmlString{grid}, number of new time samples -\end{itemize} - -\paragraph{Method: HistorySetSnapShot} -This Post-Processor performs a conversion from HistorySet to PointSet. -The conversion is made so that each history $H$ is converted to a single point $P$. -There are several methods that can be employed to choose the single point from the history: -\begin{itemize} - \item min: Take a time slice when the \xmlNode{pivotVar} is at its smallest value, - \item max: Take a time slice when the \xmlNode{pivotVar} is at its largest value, - \item average: Take a time slice when the \xmlNode{pivotVar} is at its time-weighted average value, - \item value: Take a time slice when the \xmlNode{pivotVar} \emph{first passes} its specified value, - \item timeSlice: Take a time slice index from the sampled time instance space. -\end{itemize} -To demonstrate the timeSlice, assume that each history H is a dict of n output variables $x_1=[...], -x_n=[...]$, then the resulting point P is at time instant index t: $P=[x_1[t],...,x_n[t]]$. - -Choosing one the these methods for the \xmlNode{type} node will take a time slice for all the variables in the -output space based on the provided parameters. Alternatively, a \xmlString{mixed} type can be used, in which -each output variable can use a different time slice parameter. In other words, you can take the max of one -variable while taking the minimum of another, etc. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{type}, \xmlDesc{string, required field}, type of operation: \xmlString{min}, \xmlString{max}, - \xmlString{average}, \xmlString{value}, \xmlString{timeSlice}, or \xmlString{mixed} - \item \xmlNode{extension}, \xmlDesc{string, required field}, type of extension when the sync process goes outside the boundaries of the history (zeroed or extended) - \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, name of the temporal variable. Required for the - \xmlString{average} and \xmlString{timeSlice} methods. -\end{itemize} - -If a \xmlString{timeSlice} type is in use, the following nodes also are required: -\begin{itemize} - \item \xmlNode{timeInstant}, \xmlDesc{integer, required field}, required and only used in the - \xmlString{timeSlice} type. Location of the time slice (integer index) - \item \xmlNode{numberOfSamples}, \xmlDesc{integer, required field}, number of samples -\end{itemize} - -If instead a \xmlString{min}, \xmlString{max}, \xmlString{average}, or \xmlString{value} is used, the following nodes -are also required: -\begin{itemize} - \item \xmlNode{pivotVar}, \xmlDesc{string, required field}, Name of the chosen indexing variable (the - variable whose min, max, average, or value is used to determine the time slice) - \item \xmlNode{pivotVal}, \xmlDesc{float, optional field}, required for \xmlString{value} type, the value for the chosen variable -\end{itemize} - -Lastly, if a \xmlString{mixed} approach is used, the following nodes apply: -\begin{itemize} - \item \xmlNode{max}, \xmlDesc{string, optional field}, the names of variables whose output should be their - own maximum value within the history. - \item \xmlNode{min}, \xmlDesc{string, optional field}, the names of variables whose output should be their - own minimum value within the history. - \item \xmlNode{average}, \xmlDesc{string, optional field}, the names of variables whose output should be their - own average value within the history. Note that a \xmlNode{pivotParameter} node is required to perform averages. - \item \xmlNode{value}, \xmlDesc{string, optional field}, the names of variables whose output should be taken - at a time slice determined by another variable. As with the non-mixed \xmlString{value} type, the first - time the \xmlAttr{pivotVar} crosses the specified \xmlAttr{pivotVal} will be the time slice taken. - This node requires two attributes, if used: - \begin{itemize} - \item \xmlAttr{pivotVar}, \xmlDesc{string, required field}, the name of the variable on which the time - slice will be performed. That is, if we want the value of $y$ when $t=0.245$, - this attribute would be \xmlString{t}. - \item \xmlAttr{pivotVal}, \xmlDesc{float, required field}, the value of the \xmlAttr{pivotVar} on which the time - slice will be performed. That is, if we want the value of $y$ when $t=0.245$, - this attribute would be \xmlString{0.245}. - \end{itemize} - Note that all the outputs of the \xmlNode{DataObject} output of this postprocessor must be listed under one - of the \xmlString{mixed} node types in order for values to be returned. -\end{itemize} - -\textbf{Example (mixed):} -This example will output the average value of $x$ for $x$, the value of $y$ at -time$=0.245$ for $y$, and the value of $z$ at $x=4.0$ for $z$. -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - HistorySetSnapShot - mixed - x - y - z - time - zeroed - - ... - - ... - -\end{lstlisting} - - -\paragraph{Method: HS2PS} - -This Post-Processor performs a conversion from HistorySet to PointSet. -The conversion is made so that each history $H$ is converted to a single point $P$. -Assume that each history $H$ is a dict of $n$ output variables $x_1=[...],x_n=[...]$, then the resulting point $P$ is $P=concat(x_1,...,x_n)$. -Note: it is here assumed that all histories have been sync so that they have the same length, start point and end point. If you are not sure, do a pre-processing the the original history set. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified (min, max, avg and value case): - -\begin{itemize} - \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable (only for avg) -\end{itemize} - -\paragraph{Method: TypicalHistoryFromHistorySet} -This Post-Processor performs a simplified procedure of \cite{wilcox2008users} to form a ``typical'' time series from multiple time series. The input should be a HistorySet, with each history in the HistorySet synchronized. For HistorySet that is not synchronized, use Post-Processor method \textbf{HistorySetSync} to synchronize the data before running this method. - -Each history in input HistorySet is first converted to multiple histories each has maximum time specified in \xmlNode{outputLen} (see below). Each converted history $H_i$ is divided into a set of subsequences $\{H_i^j\}$, and the division is guided by the \xmlNode{subseqLen} node specified in the input XML. The value of \xmlNode{subseqLen} should be a list of positive numbers that specify the length of each subsequence. If the number of subsequence for each history is more than the number of values given in \xmlNode{subseqLen}, the values in \xmlNode{subseqLen} would be reused. - -For each variable $x$, the method first computes the empirical CDF (cumulative density function) by using all the data values of $x$ in the HistorySet. This CDF is termed as long-term CDF for $x$. Then for each subsequence $H_i^j$, the method computes the empirical CDF by using all the data values of $x$ in $H_i^j$. This CDF is termed as subsequential CDF. For the first interval window (i.e., $j=1$), the method computes the Finkelstein-Schafer (FS) statistics \cite{finkelstein1971improved} between the long term CDF and the subsequential CDF of $H_i^1$ for each $i$. The FS statistics is defined as following. -\begin{align*} -FS & = \sum_x FS_x\\ -FS_x &= \frac{1}{N}\sum_{n=1}^N\delta_n -\end{align*} -where $N$ is the number of value reading in the empirical CDF and $\delta_n$ is the absolute difference between the long term CDF and the subsequential CDF at value $x_n$. The subsequence $H_i^1$ with minimal FS statistics will be selected as the typical subsequence for the interval window $j=1$. Such process repeats for $j=2,3,\dots$ until all subsequences have been processed. Then all the typical subsequences will be concatenated to form a complete history. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable - \default{Time} - \item \xmlNode{subseqLen}, \xmlDesc{integers, required field}, length of the divided subsequence (see above) - \item \xmlNode{outputLen}, \xmlDesc{integer, optional field}, maximum value of the temporal variable for the generated typical history - \default{Maximum value of the variable with name of \xmlNode{pivotParameter}} -\end{itemize} - -For example, consider history of data collected over three years in one-second increments, -where the user wants a single \emph{typical year} extracted from the data. -The user wants this data constructed by combining twelve equal \emph{typical month} -segments. In this case, the parameter \xmlNode{outputLen} should be \texttt{31536000} (the number of seconds -in a year), while the parameter \xmlNode{subseqLen} should be \texttt{2592000} (the number of seconds in a -month). Using a value for \xmlNode{subseqLen} that is either much, much smaller than \xmlNode{outputLen} or -of equal size to \xmlNode{outputLen} might have unexpected results. In general, we recommend using a -\xmlNode{subseqLen} that is roughly an order of magnitude smaller than \xmlNode{outputLen}. - -\paragraph{Method: dataObjectLabelFilter} -This Post-Processor allows to filter the portion of a dataObject, either PointSet or HistorySet, with a given clustering label. -A clustering algorithm associates a unique cluster label to each element of the dataObject (PointSet or HistorySet). -This cluster label is a natural number ranging from $0$ (or $1$ depending on the algorithm) to $N$ where $N$ is the number of obtained clusters. -Recall that some clustering algorithms (e.g., K-Means) receive $N$ as input while others (e.g., Mean-Shift) determine $N$ after clustering has been performed. -Thus, this Post-Processor is naturally employed after a data-mining clustering techniques has been performed on a dataObject so that each clusters -can be analyzed separately. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independently of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{label}, \xmlDesc{string, required field}, name of the clustering label - \item \xmlNode{clusterIDs}, \xmlDesc{integers, required field}, ID of the selected clusters. Note that more than one ID can be provided as input -\end{itemize} - - -\paragraph{Method: Discrete Risk Measures} -This Post-Processor calculates a series of risk importance measures from a PointSet. This calculation if performed for a set of input parameters given an output target. - -The user is required to provide the following information: -\begin{itemize} - \item the set of input variables. For each variable the following need to be specified: - \begin{itemize} - \item the set of values that imply a reliability value equal to $1$ for the input variable - \item the set of values that imply a reliability value equal to $0$ for the input variable - \end{itemize} - \item the output target variable. For this variable it is needed to specify the values of the output target variable that defines the desired outcome. -\end{itemize} - -The following variables are first determined for each input variable $i$: -\begin{itemize} - \item $R_0$ Probability of the outcome of the output target variable (nominal value) - \item $R^{+}_i$ Probability of the outcome of the output target variable if reliability of the input variable is equal to $0$ - \item $R^{-}_i$ Probability of the outcome of the output target variable if reliability of the input variable is equal to $1$ -\end{itemize} - -Available measures are: -\begin{itemize} - \item Risk Achievement Worth (RAW): $RAW = R^{+}_i / R_0 $ - \item Risk Achievement Worth (RRW): $RRW = R_0 / R^{-}_i$ - \item Fussell-Vesely (FV): $FV = (R_0 - R^{-}_i) / R_0$ - \item Birnbaum (B): $B = R^{+}_i - R^{-}_i$ -\end{itemize} - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{measures}, \xmlDesc{string, required field}, desired risk importance measures that have to be computed (RRW, RAW, FV, B) - \item \xmlNode{variable}, \xmlDesc{string, required field}, ID of the input variable. This node is provided for each input variable. This nodes needs to contain also these attributes: - \begin{itemize} - \item \xmlAttr{R0values}, \xmlDesc{float, required field}, interval of values (comma separated values) that implies a reliability value equal to $0$ for the input variable - \item \xmlAttr{R1values}, \xmlDesc{float, required field}, interval of values (comma separated values) that implies a reliability value equal to $1$ for the input variable - \end{itemize} - \item \xmlNode{target}, \xmlDesc{string, required field}, ID of the output variable. This nodes needs to contain also the attribute \xmlAttr{values}, \xmlDesc{string, required field}, interval of - values of the output target variable that defines the desired outcome -\end{itemize} - -\textbf{Example:} -This example shows an example where it is desired to calculate all available risk importance measures for two input variables (i.e., pumpTime and valveTime) -given an output target variable (i.e., Tmax). -A value of the input variable pumpTime in the interval $[0,240]$ implies a reliability value of the input variable pumpTime equal to $0$. -A value of the input variable valveTime in the interval $[0,60]$ implies a reliability value of the input variable valveTime equal to $0$. -A value of the input variables valveTime and pumpTime in the interval $[1441,2880]$ implies a reliability value of the input variables equal to $1$. -The desired outcome of the output variable Tmax occurs in the interval $[2200,2500]$. -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - RiskMeasuresDiscrete - B,FV,RAW,RRW - pumpTime - valveTime - Tmax - - ... - - ... - -\end{lstlisting} - -This Post-Processor allows the user to consider also multiple datasets (a data set for each initiating event) and calculate the global risk importance measures. -This can be performed by: -\begin{itemize} - \item Including all datasets in the step -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - outRun1 - outRun2 - riskMeasuresDiscrete - outPPS - PrintPPS_dump - - - ... - -\end{lstlisting} - \item Adding in the Post-processor the frequency of the initiating event associated to each dataset -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - riskMeasuresDiscrete - FV,RAW - Astatus - Bstatus - Cstatus - Dstatus - outcome - outRun1 - outRun2 - - ... - - ... - -\end{lstlisting} - -\end{itemize} - -This post-processor can be made time dependent if a single HistorySet is provided among the other data objects. -The HistorySet contains the temporal profiles of a subset of the input variables. This temporal profile can be only -boolean, i.e., 0 (component offline) or 1 (component online). -Note that the provided history set must contains a single History; multiple Histories are not allowed. -When this post-processor is in a dynamic configuration (i.e., time-dependent), the user is required to specify an xml -node \xmlNode{temporalID} that indicates the ID of the temporal variable. -For each time instant, this post-processor determines the temporal profiles of the desired risk importance measures. -Thus, in this case, an HistorySet must be chosen as an output data object. -An example is shown below: -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - riskMeasuresDiscrete - B,FV,RAW,RRW,R0 - Astatus - Bstatus - Cstatus - outcome - outRun1 - time - - ... - - ... - - ... - - outRun1 - timeDepProfiles - riskMeasuresDiscrete - outHS - PrintHS - - ... - - ... - -\end{lstlisting} +\input{PostProcessors/InterfacedPostProcessors.tex} % %%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%% RavenOutput PP %%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%% % +\input{PostProcessors/RavenOutput.tex} -\subsubsection{RavenOutput} -\label{RavenOutput} -The \textbf{RavenOutput} post-processor is specifically used -to gather data from RAVEN output files and generate a PointSet suitable for plotting or other analysis. -It can do this in two modes: static and dynamic. In static mode, the -PostProcessor reads from from several static XML output files produced by RAVEN. In dynamic mode, the PostProcessor -reads from a single dynamic XML output file and builds a PointSet where the pivot parameter (e.g. time) is the -input and the requested values are returned for each of the pivot parameter values (e.g. points in time). The -name for the pivot parameter will be taken directly from the XML structure. -% -Note: by default the PostProcessor operates in static mode; to read a dynamic file, the \xmlNode{dynamic} node must -be specified. -% -\ppType{RavenOutput}{RavenOutput} -% -\begin{itemize} - \item \xmlNode{dynamic}, \xmlDesc{string, optional field}, if included will trigger reading a single dynamic - file instead of multiple static files, unless the text of this field is \xmlString{false}, in which case it - will return to the default (multiple static files). \default(False) - \item \xmlNode{File}, \xmlDesc{XML Node, required field} - % - For each file to be read by this postprocessor, an entry in the \xmlNode{Files} node must be added, and a - \xmlNode{File} node must be added to the postprocessor input block. The \xmlNode{File} requires two - identifying attributes: - \begin{itemize} - \item \xmlAttr{name}, \xmlDesc{string, required field}, the RAVEN-assigned name of the file, - \item \xmlAttr{ID}, \xmlDesc{float, optional field}, the floating point ID that will be unique to this - file. This will appear as an entry in the output \xmlNode{DataObject} and the corresponding column are - the values extracted from this file. If not specified, RAVEN will attempt to find a suitable integer ID - to use, and a warning will be raised. - - When defining the \xmlNode{DataObject} that this postprocessor will write to, and when using the static - (non-\xmlNode{dynamic}) form of the postprocessor, the \xmlNode{input} space should be given as - \xmlString{ID}, and the output variables should be the outputs specified in the postprocessor. See the - examples below. In the data object, the variable values will be keyed on the \xmlString{ID} parameter. - \end{itemize} - Each value that needs to be extracted from the file needs to be specified by one of the following - \xmlNode{output} nodes within the \xmlNode{File} node: - \begin{itemize} - \item \xmlNode{output}, \xmlDesc{|-separated string, required field}, - the specification of the output to extract from the file. - RAVEN uses \texttt{xpath} as implemented in Python's \texttt{xml.etree} module to specify locations - in XML. For example, to search tags, use a path - separated by forward slash characters (``/''), starting under the root; this means the root node should not - be included in the path. See the example. For more details on xpath options available, see - \url{https://docs.python.org/2/library/xml.etree.elementtree.html#xpath-support}. - % - The \xmlNode{output} node requires the following attribute: - \begin{itemize} - \item \xmlAttr{name}, \xmlDesc{string, required field}, specifies the entry in the Data Object that - this value should be stored under. - \end{itemize} - - \end{itemize} - % -\end{itemize} -\textbf{Example (Static):} -Using an example, let us have two input files, named \emph{in1.xml} and \emph{in2.xml}. They appear as -follows. Note that the name of the variables we want changes slightly between the XML; this is fine. - -\textbf{\emph{in1.xml}} -\begin{lstlisting}[style=XML] - - - 6 - 7 - - -\end{lstlisting} -\textbf{\emph{in2.xml}} -\begin{lstlisting}[style=XML] - - - 6.1 - 7.1 - - -\end{lstlisting} - -The RAVEN input to extract this information would appear as follows. -We include an example of defining the \xmlNode{DataObject} that this postprocessor will write out to, for -further clarity. - -\begin{lstlisting}[style=XML] - - ... - - inp1.xml - inp2.xml - - ... - - ... - - - ans/val1 - ans/val2 - - - ans/first - ans/second - - - ... - - ... - - ... - - ID - first,second - - ... - - ... - -\end{lstlisting} - -\textbf{Example (Dynamic):} -For a dynamic example, consider this time-evolution of values example. \emph{inFile.xml} is a RAVEN dynamic -XML output. - -\textbf{\emph{in1.xml}} -\begin{lstlisting}[style=XML] - - -\end{lstlisting} -The RAVEN input to extract this information would appear as follows: -\begin{lstlisting}[style=XML] - - ... - - inFile.xml - - ... - - ... - - true - - ans|val1 - - - ... - - ... - -\end{lstlisting} -The resulting PointSet has \emph{time} as an input and \emph{first} as an output. %%%%%%%%%%%%%% ParetoFrontier PP %%%%%%%%%%%%%%%%%%% \subsubsection{ParetoFrontier} \label{ParetoFrontierPP} The \textbf{ParetoFrontier} post-processor is designed to identify the points lying on the Pareto Frontier in a multi-dimensional trade-space. -This post-processor receives as input a \textbf{DataObject} (a PointSet only) which contains all data points in the trade-space space and it +This post-processor receives as input a \textbf{DataObject} (a PointSet only) which contains all data points in the trade-space space and it returns the subset of points lying in the Pareto Frontier as a PointSet. -It is here assumed that each data point of the input PointSet is a realization of the system under consideration for a +It is here assumed that each data point of the input PointSet is a realization of the system under consideration for a specific configuration to which corresponds several objective variables (e.g., cost and value). % \ppType{ParetoFrontier}{ParetoFrontier} % \begin{itemize} - \item \xmlNode{objective},\xmlDesc{string, required parameter}, ID of the objective variable that represents a dimension of the trade-space space. + \item \xmlNode{objective},\xmlDesc{string, required parameter}, ID of the objective variable that represents a dimension of the trade-space space. The \xmlNode{costID} requires one identifying attribute: \begin{itemize} \item \xmlAttr{goal}, \xmlDesc{string, required field}, Goal of the objective variable characteristic: minimzation (min) or maximization (max) @@ -1883,9 +1131,9 @@ \subsubsection{ParetoFrontier} \end{itemize} \end{itemize} -The following is an example where a set of realizations (the ``candidates'' PointSet) has been generated by changing two parameters +The following is an example where a set of realizations (the ``candidates'' PointSet) has been generated by changing two parameters (var1 and var2) which produced two output variables: cost (which it is desired to be minimized) and value (which it is desired to be maximized). -The \textbf{ParetoFrontier} post-processor takes the ``candidates'' PointSet and populates a Point similar in structure +The \textbf{ParetoFrontier} post-processor takes the ``candidates'' PointSet and populates a Point similar in structure (the ``paretoPoints'' PointSet). \textbf{Example:} @@ -1894,7 +1142,7 @@ \subsubsection{ParetoFrontier} cost value - + @@ -1904,7 +1152,7 @@ \subsubsection{ParetoFrontier} paretoPoints - + var1,var2 @@ -1926,7 +1174,7 @@ \subsubsection{ParetoFrontier} \subsubsection{MCSImporter} \label{MCSimporterPP} The \textbf{MCSImporter} post-processor has been designed to import Minimal Cut Sets (MCSs) into RAVEN. -This post-processor reads a csv file which contain the list of MCSs and it save this list as a DataObject +This post-processor reads a csv file which contain the list of MCSs and it save this list as a DataObject (i.e., a PointSet). The csv file is composed by three columns; the first contains the ID number of the MCS, the second one contains the MCS probability value, the third one lists all the Basic Events contained in the MCS. @@ -1948,7 +1196,7 @@ \subsubsection{MCSImporter} \end{tabular} \end{table} -The PointSet is structured to include all Basic Event, the MCS ID, the MCS probability, and the outcome of such MCS +The PointSet is structured to include all Basic Event, the MCS ID, the MCS probability, and the outcome of such MCS (always set to 1). MCS ID and MCS probability are copied directly from the csv file. For each MCS, the Basic Events can have two possible values: @@ -1979,7 +1227,7 @@ \subsubsection{MCSImporter} \begin{itemize} \item \xmlNode{expand},\xmlDesc{bool, required parameter}, expand the set of Basic Events by including all PRA Basic Events and not only the once listed in the MCSs - \item \xmlNode{BElistColumn},\xmlDesc{string, optional parameter}, if expand is set to True, then this node contains the + \item \xmlNode{BElistColumn},\xmlDesc{string, optional parameter}, if expand is set to True, then this node contains the column of the csv file which contains all the PRA Basic Events \end{itemize} @@ -1988,11 +1236,11 @@ \subsubsection{MCSImporter} MCSlist.csv - + False - + @@ -2002,7 +1250,7 @@ \subsubsection{MCSImporter} MCS_PS - + A,B,C,D,E @@ -2017,11 +1265,11 @@ \subsubsection{MCSImporter} MCSlist.csv BElist.csv - + False - + @@ -2032,7 +1280,7 @@ \subsubsection{MCSImporter} MCS_PS - + A,B,C,D,E,F,G @@ -2892,7 +2140,7 @@ \subsubsection{FastFourierTransform} \begin{itemize} \item \xmlNode{target}, \xmlDesc{comma separated strings, required field}, specifies the names of the target(s) for which the fast Fourier transform should be calculated. - \end{itemize} + \end{itemize} \textbf{Example:} \begin{lstlisting}[style=XML] diff --git a/framework/Models/PostProcessors/MCSimporter.py b/framework/Models/PostProcessors/MCSimporter.py index 8c3a6b1189..94ac9fb9c3 100644 --- a/framework/Models/PostProcessors/MCSimporter.py +++ b/framework/Models/PostProcessors/MCSimporter.py @@ -78,7 +78,7 @@ def _handleInput(self, paramInput): beListColumn = paramInput.findFirst('BElistColumn') self.beListColumn = beListColumn.value - def run(self, inputs): + def run(self, inputIn): """ This method executes the PostProcessor action. @ In, inputIn, dict, dictionary contains the input data and input files, i.e., From 25f343125be0c5d0c007cccb6c5055f4d9e37902 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 23 Apr 2021 09:20:43 -0600 Subject: [PATCH 26/51] update RiskMeasureDiscrete PP document --- doc/user_manual/Makefile | 2 +- .../PostProcessors/DiscreteRiskMeasures.tex | 165 ++++++++++++++++++ .../InterfacedPostProcessors.tex | 155 ---------------- doc/user_manual/model.tex | 2 +- doc/user_manual/postprocessor.tex | 12 +- 5 files changed, 176 insertions(+), 160 deletions(-) create mode 100644 doc/user_manual/PostProcessors/DiscreteRiskMeasures.tex diff --git a/doc/user_manual/Makefile b/doc/user_manual/Makefile index 9ce9b39d63..d56e498b4a 100644 --- a/doc/user_manual/Makefile +++ b/doc/user_manual/Makefile @@ -2,7 +2,7 @@ SRCFILE = raven_user_manual MANUAL_FILES = generated/optimizer.tex rom.tex kerasROM.tex postprocessor.tex database_data.tex OutStreamSystem.tex sampler.tex variablegroups.tex \ existing_interfaces.tex ProbabilityDistributions.tex step.tex functions.tex ravenStructure.tex Summary.tex \ introduction.tex raven_user_manual.tex model.tex runInfo.tex libraries.tex PostProcessors/DataMining.tex \ -PostProcessors/InterfacedPostProcessors.tex HowToRun.tex metrics.tex \ +PostProcessors/InterfacedPostProcessors.tex PostProcessors/DiscreteRiskMeasures.tex HowToRun.tex metrics.tex \ Installation/clone.tex Installation/conda.tex Installation/linux.tex Installation/macosx.tex Installation/main.tex \ Installation/overview.tex Installation/windows.tex advanced_users_templates.tex LATEX_FLAGS=-interaction=nonstopmode diff --git a/doc/user_manual/PostProcessors/DiscreteRiskMeasures.tex b/doc/user_manual/PostProcessors/DiscreteRiskMeasures.tex new file mode 100644 index 0000000000..6ab2418f23 --- /dev/null +++ b/doc/user_manual/PostProcessors/DiscreteRiskMeasures.tex @@ -0,0 +1,165 @@ +\subsubsection{Discrete Risk Measures} +\label{DiscreteRiskMeasures} +This Post-Processor calculates a series of risk importance measures from a PointSet. +This calculation is performed for a set of input parameters given an output target. + +The user is required to provide the following information: +\begin{itemize} + \item the set of input variables. For each variable the following need to be specified: + \begin{itemize} + \item the set of values that imply a reliability value equal to $1$ for the input variable + \item the set of values that imply a reliability value equal to $0$ for the input variable + \end{itemize} + \item the output target variable. For this variable it is needed to specify the values of + the output target variable that defines the desired outcome. +\end{itemize} + +The following variables are first determined for each input variable $i$: +\begin{itemize} + \item $R_0$ Probability of the outcome of the output target variable (nominal value) + \item $R^{+}_i$ Probability of the outcome of the output target variable if reliability of the input variable is equal to $0$ + \item $R^{-}_i$ Probability of the outcome of the output target variable if reliability of the input variable is equal to $1$ +\end{itemize} + +Available measures are: +\begin{itemize} + \item Risk Achievement Worth (RAW): $RAW = R^{+}_i / R_0 $ + \item Risk Achievement Worth (RRW): $RRW = R_0 / R^{-}_i$ + \item Fussell-Vesely (FV): $FV = (R_0 - R^{-}_i) / R_0$ + \item Birnbaum (B): $B = R^{+}_i - R^{-}_i$ +\end{itemize} + +\ppType{RiskMeasureDiscrete}{RiskMeasureDiscrete} + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{measures}, \xmlDesc{string, required field}, desired risk importance measures + that have to be computed (RRW, RAW, FV, B) + \item \xmlNode{variable}, \xmlDesc{string, required field}, ID of the input variable. This + node is provided for each input variable. This nodes needs to contain also these attributes: + \begin{itemize} + \item \xmlAttr{R0values}, \xmlDesc{float, required field}, interval of values (comma separated values) + that implies a reliability value equal to $0$ for the input variable + \item \xmlAttr{R1values}, \xmlDesc{float, required field}, interval of values (comma separated values) + that implies a reliability value equal to $1$ for the input variable + \end{itemize} + \item \xmlNode{target}, \xmlDesc{string, required field}, ID of the output variable. This nodes needs to + contain also the attribute \xmlAttr{values}, \xmlDesc{string, required field}, interval of + values of the output target variable that defines the desired outcome +\end{itemize} + +\textbf{Example:} +This example shows an example where it is desired to calculate all available risk importance +measures for two input variables (i.e., pumpTime and valveTime) +given an output target variable (i.e., Tmax). +A value of the input variable pumpTime in the interval $[0,240]$ implies a reliability +value of the input variable pumpTime equal to $0$. +A value of the input variable valveTime in the interval $[0,60]$ implies a reliability +value of the input variable valveTime equal to $0$. +A value of the input variables valveTime and pumpTime in the interval $[1441,2880]$ implies a +reliability value of the input variables equal to $1$. +The desired outcome of the output variable Tmax occurs in the interval $[2200,2500]$. +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + B,FV,RAW,RRW + pumpTime + valveTime + Tmax + + ... + + ... + +\end{lstlisting} + +This Post-Processor allows the user to consider also multiple datasets (a data set for each initiating event) +and calculate the global risk importance measures. +This can be performed by: +\begin{itemize} + \item Including all datasets in the step +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + outRun1 + outRun2 + riskMeasuresDiscrete + outPPS + PrintPPS_dump + + + ... + +\end{lstlisting} + \item Adding in the Post-processor the frequency of the initiating event associated to each dataset +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + FV,RAW + Astatus + Bstatus + Cstatus + Dstatus + outcome + outRun1 + outRun2 + + ... + + ... + +\end{lstlisting} + +\end{itemize} + +This post-processor can be made time dependent if a single HistorySet is provided among the other data objects. +The HistorySet contains the temporal profiles of a subset of the input variables. This temporal profile can be only +boolean, i.e., 0 (component offline) or 1 (component online). +Note that the provided history set must contains a single History; multiple Histories are not allowed. +When this post-processor is in a dynamic configuration (i.e., time-dependent), the user is required to specify an xml +node \xmlNode{temporalID} that indicates the ID of the temporal variable. +For each time instant, this post-processor determines the temporal profiles of the desired risk importance measures. +Thus, in this case, an HistorySet must be chosen as an output data object. +An example is shown below: +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + B,FV,RAW,RRW,R0 + Astatus + Bstatus + Cstatus + outcome + outRun1 + time + + ... + + ... + + ... + + outRun1 + timeDepProfiles + riskMeasuresDiscrete + outHS + PrintHS + + ... + + ... + +\end{lstlisting} diff --git a/doc/user_manual/PostProcessors/InterfacedPostProcessors.tex b/doc/user_manual/PostProcessors/InterfacedPostProcessors.tex index 081a33c285..1756c45b71 100644 --- a/doc/user_manual/PostProcessors/InterfacedPostProcessors.tex +++ b/doc/user_manual/PostProcessors/InterfacedPostProcessors.tex @@ -436,158 +436,3 @@ \subsubsection{Interfaced} \item \xmlNode{label}, \xmlDesc{string, required field}, name of the clustering label \item \xmlNode{clusterIDs}, \xmlDesc{integers, required field}, ID of the selected clusters. Note that more than one ID can be provided as input \end{itemize} - - -\paragraph{Method: Discrete Risk Measures} -This Post-Processor calculates a series of risk importance measures from a PointSet. This calculation if performed for a set of input parameters given an output target. - -The user is required to provide the following information: -\begin{itemize} - \item the set of input variables. For each variable the following need to be specified: - \begin{itemize} - \item the set of values that imply a reliability value equal to $1$ for the input variable - \item the set of values that imply a reliability value equal to $0$ for the input variable - \end{itemize} - \item the output target variable. For this variable it is needed to specify the values of the output target variable that defines the desired outcome. -\end{itemize} - -The following variables are first determined for each input variable $i$: -\begin{itemize} - \item $R_0$ Probability of the outcome of the output target variable (nominal value) - \item $R^{+}_i$ Probability of the outcome of the output target variable if reliability of the input variable is equal to $0$ - \item $R^{-}_i$ Probability of the outcome of the output target variable if reliability of the input variable is equal to $1$ -\end{itemize} - -Available measures are: -\begin{itemize} - \item Risk Achievement Worth (RAW): $RAW = R^{+}_i / R_0 $ - \item Risk Achievement Worth (RRW): $RRW = R_0 / R^{-}_i$ - \item Fussell-Vesely (FV): $FV = (R_0 - R^{-}_i) / R_0$ - \item Birnbaum (B): $B = R^{+}_i - R^{-}_i$ -\end{itemize} - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{measures}, \xmlDesc{string, required field}, desired risk importance measures that have to be computed (RRW, RAW, FV, B) - \item \xmlNode{variable}, \xmlDesc{string, required field}, ID of the input variable. This node is provided for each input variable. This nodes needs to contain also these attributes: - \begin{itemize} - \item \xmlAttr{R0values}, \xmlDesc{float, required field}, interval of values (comma separated values) that implies a reliability value equal to $0$ for the input variable - \item \xmlAttr{R1values}, \xmlDesc{float, required field}, interval of values (comma separated values) that implies a reliability value equal to $1$ for the input variable - \end{itemize} - \item \xmlNode{target}, \xmlDesc{string, required field}, ID of the output variable. This nodes needs to contain also the attribute \xmlAttr{values}, \xmlDesc{string, required field}, interval of - values of the output target variable that defines the desired outcome -\end{itemize} - -\textbf{Example:} -This example shows an example where it is desired to calculate all available risk importance measures for two input variables (i.e., pumpTime and valveTime) -given an output target variable (i.e., Tmax). -A value of the input variable pumpTime in the interval $[0,240]$ implies a reliability value of the input variable pumpTime equal to $0$. -A value of the input variable valveTime in the interval $[0,60]$ implies a reliability value of the input variable valveTime equal to $0$. -A value of the input variables valveTime and pumpTime in the interval $[1441,2880]$ implies a reliability value of the input variables equal to $1$. -The desired outcome of the output variable Tmax occurs in the interval $[2200,2500]$. -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - RiskMeasuresDiscrete - B,FV,RAW,RRW - pumpTime - valveTime - Tmax - - ... - - ... - -\end{lstlisting} - -This Post-Processor allows the user to consider also multiple datasets (a data set for each initiating event) and calculate the global risk importance measures. -This can be performed by: -\begin{itemize} - \item Including all datasets in the step -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - outRun1 - outRun2 - riskMeasuresDiscrete - outPPS - PrintPPS_dump - - - ... - -\end{lstlisting} - \item Adding in the Post-processor the frequency of the initiating event associated to each dataset -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - riskMeasuresDiscrete - FV,RAW - Astatus - Bstatus - Cstatus - Dstatus - outcome - outRun1 - outRun2 - - ... - - ... - -\end{lstlisting} - -\end{itemize} - -This post-processor can be made time dependent if a single HistorySet is provided among the other data objects. -The HistorySet contains the temporal profiles of a subset of the input variables. This temporal profile can be only -boolean, i.e., 0 (component offline) or 1 (component online). -Note that the provided history set must contains a single History; multiple Histories are not allowed. -When this post-processor is in a dynamic configuration (i.e., time-dependent), the user is required to specify an xml -node \xmlNode{temporalID} that indicates the ID of the temporal variable. -For each time instant, this post-processor determines the temporal profiles of the desired risk importance measures. -Thus, in this case, an HistorySet must be chosen as an output data object. -An example is shown below: -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - riskMeasuresDiscrete - B,FV,RAW,RRW,R0 - Astatus - Bstatus - Cstatus - outcome - outRun1 - time - - ... - - ... - - ... - - outRun1 - timeDepProfiles - riskMeasuresDiscrete - outHS - PrintHS - - ... - - ... - -\end{lstlisting} diff --git a/doc/user_manual/model.tex b/doc/user_manual/model.tex index 75cc3c80d0..6d63b70440 100644 --- a/doc/user_manual/model.tex +++ b/doc/user_manual/model.tex @@ -74,7 +74,7 @@ \section{Models} In order to use the \textit{#1} PP, the user needs to set the \xmlAttr{subType} of a \xmlNode{PostProcessor} node: - \xmlNode{PostProcessor \xmlAttr{subType}=\xmlString{#2}/}. + \xmlNode{PostProcessor \xmlAttr{name}=\xmlString{ppName} \xmlAttr{subType}=\xmlString{#2}/}. Several sub-nodes are available: } diff --git a/doc/user_manual/postprocessor.tex b/doc/user_manual/postprocessor.tex index 5ae9d3eb59..61cdea3adc 100644 --- a/doc/user_manual/postprocessor.tex +++ b/doc/user_manual/postprocessor.tex @@ -19,8 +19,9 @@ \subsection{PostProcessor} \item \textbf{LimitSurfaceIntegral} \item \textbf{External} \item \textbf{TopologicalDecomposition} - \item \textbf{RavenOutput} + %\item \textbf{RavenOutput} \item \textbf{DataMining} + \item \textbf{RiskMeasureDiscrete} \item \textbf{Metric} \item \textbf{CrossValidation} \item \textbf{DataClassifier} @@ -1096,6 +1097,9 @@ \subsubsection{TopologicalDecomposition} %TO BE MOVED TO STEP ``IOSTEP'' % +%%%%% Risk Measures Discrete PP %%%%%%%%%% +\input{PostProcessors/DiscreteRiskMeasures.tex} + %%%%% PP External %%%%%%% \input{PostProcessors/InterfacedPostProcessors.tex} @@ -1103,8 +1107,10 @@ \subsubsection{TopologicalDecomposition} %%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%% RavenOutput PP %%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%% -% -\input{PostProcessors/RavenOutput.tex} +% FIXME: I think this is not valid anymore. We do not support the RavenOutput PP anymore. +% In addition, all the related tests are disabled. It seems to me we should remove all +% related files and tests (wangc - 4/23/2021) +%\input{PostProcessors/RavenOutput.tex} %%%%%%%%%%%%%% ParetoFrontier PP %%%%%%%%%%%%%%%%%%% From 25f188fdfde41c1aa1ac09db024a697af1ecedd4 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Tue, 27 Apr 2021 22:06:14 -0600 Subject: [PATCH 27/51] convert HS2PS to use DataSet --- .../PostProcessorPluginBase.py | 5 +- framework/PostProcessorFunctions/HS2PS.py | 147 +++++++----------- .../InterfacedPostProcessor/test_HS2PS.xml | 5 +- 3 files changed, 60 insertions(+), 97 deletions(-) diff --git a/framework/PluginsBaseClasses/PostProcessorPluginBase.py b/framework/PluginsBaseClasses/PostProcessorPluginBase.py index 2b48bbe38a..393af5ad6c 100644 --- a/framework/PluginsBaseClasses/PostProcessorPluginBase.py +++ b/framework/PluginsBaseClasses/PostProcessorPluginBase.py @@ -129,7 +129,10 @@ def createPostProcessorInput(self, inputObjs, **kwargs): inputDict['Files'].append(inp) elif isinstance(inp, DataObject.DataObject): dataType = self.getInputDataType() - inputDict['Data'].append(inp.asDataset(outType=dataType)) + data = inp.asDataset(outType=dataType) + inpVars = inp.getVars('input') + outVars = inp.getVars('output') + inputDict['Data'].append((inpVars, outVars, data)) else: self.raiseAnError(IOError, "Unknown input is found", str(inp)) return inputDict diff --git a/framework/PostProcessorFunctions/HS2PS.py b/framework/PostProcessorFunctions/HS2PS.py index 010fd4a2ab..c654208ba1 100644 --- a/framework/PostProcessorFunctions/HS2PS.py +++ b/framework/PostProcessorFunctions/HS2PS.py @@ -12,22 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -#for future compatibility with Python 3-------------------------------------------------------------- -from __future__ import division, print_function, unicode_literals, absolute_import -#End compatibility block for Python 3---------------------------------------------------------------- - #External Modules------------------------------------------------------------------------------------ import os import copy -import itertools import numpy as np +import xarray as xr #External Modules End-------------------------------------------------------------------------------- -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase, CheckInterfacePP +#Internal Modules--------------------------------------------------------------- from utils import InputData, InputTypes +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase +#Internal Modules End----------------------------------------------------------- - -class HS2PS(PostProcessorInterfaceBase): +class HS2PS(PostProcessorPluginBase): """ This Post-Processor performs the conversion from HistorySet to PointSet The conversion is made so that each history H is converted to a single point P. @@ -45,29 +42,41 @@ class cls. specifying input of cls. """ inputSpecification = super().getInputSpecification() - inputSpecification.setCheckClass(CheckInterfacePP("HS2PS")) inputSpecification.addSub(InputData.parameterInputFactory("pivotParameter", contentType=InputTypes.StringType)) inputSpecification.addSub(InputData.parameterInputFactory("features", contentType=InputTypes.StringListType)) - #Should method be in super class? - inputSpecification.addSub(InputData.parameterInputFactory("method", contentType=InputTypes.StringType)) return inputSpecification - def initialize(self): + def __init__(self): """ - Method to initialize the Interfaced Post-processor - @ In, None, - @ Out, None, - + Constructor + @ In, None + @ Out, None """ - PostProcessorInterfaceBase.initialize(self) - self.inputFormat = 'HistorySet' - self.outputFormat = 'PointSet' - - self.pivotParameter = None - #pivotParameter identify the ID of the temporal variable in the data set; it is used so that in the - #conversion the time array is not inserted since it is not needed (all histories have same length) - self.features = 'all' - + super().__init__() + self.printTag = 'POSTPROCESSOR HS2PS' + self.validDataType = ['PointSet'] # The list of accepted types of DataObject + ## Currently, we have used both DataObject.addRealization and DataObject.load to + ## collect the PostProcessor returned outputs. DataObject.addRealization is used to + ## collect single realization, while DataObject.load is used to collect multiple realizations + ## However, the DataObject.load can not be directly used to collect single realization + self.outputMultipleRealizations = True + self.pivotParameter = None + self.features = 'all' + self.setInputDataType('xrDataset') + + def initialize(self, runInfo, inputs, initDict=None): + """ + Method to initialize the DataClassifier post-processor. + @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) + @ In, inputs, list, list of inputs + @ In, initDict, dict, optional, dictionary with initialization options + @ Out, None + """ + super().initialize(runInfo, inputs, initDict) + if len(inputs)>1: + self.raiseAnError(IOError, 'HS2PS Post-Processor', self.name, 'accepts only one dataObject') + if inputs[0].type != 'HistorySet': + self.raiseAnError(IOError, 'HS2PS Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type)) def _handleInput(self, paramInput): """ @@ -75,80 +84,32 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ + super()._handleInput(paramInput) for child in paramInput.subparts: if child.getName() == 'pivotParameter': self.pivotParameter = child.value elif child.getName() == 'features': - self.features = child.value - elif child.getName() !='method': - self.raiseAnError(IOError, 'HS2PS Interfaced Post-Processor ' + str(self.name) + ' : XML node ' + str(child) + ' is not recognized') - + self.features = 'all' if 'all' in child.value else child.value if self.pivotParameter == None: - self.raiseAnError(IOError, 'HS2PS Interfaced Post-Processor ' + str(self.name) + ' : pivotParameter is not specified') + self.raiseAnError(IOError, 'HS2PS Post-Processor', self.name, ': pivotParameter is not specified') - def run(self,inputDic): + def run(self,inputIn): """ This method performs the actual transformation of the data object from history set to point set - @ In, inputDic, list, list of dictionaries which contains the data inside the input DataObjects - @ Out, outputDic, dict, output dictionary + @ In, inputIn, list, list of datasets which contains the data inside the input DataObjects + @ Out, output, xarray.Dataset, output dataset """ - if len(inputDic)>1: - self.raiseAnError(IOError, 'HS2PS Interfaced Post-Processor ' + str(self.name) + ' accepts only one dataObject') - else: - inputDict = inputDic[0] - outputDic = {'data': {}} - outputDic['dims'] = {} - numSamples = inputDict['numberRealizations'] - - # generate the input part of the output dictionary - for inputVar in inputDict['inpVars']: - outputDic['data'][inputVar] = inputDict['data'][inputVar] - - # generate the output part of the output dictionary - if self.features == 'all': - self.features = inputDict['outVars'] - - historyLength = len(inputDict['data'][self.features[0]][0]) - numVariables = historyLength*len(self.features) - for history in inputDict['data'][self.features[0]]: - if len(history) != historyLength: - self.raiseAnError(IOError, 'HS2PS Interfaced Post-Processor ' + str(self.name) + ' : one or more histories in the historySet have different time scale') - - tempDict = {} - matrix = np.zeros((numSamples,numVariables)) - for i in range(numSamples): - temp = np.empty(0) - for feature in self.features: - temp=np.append(temp,inputDict['data'][feature][i]) - matrix[i,:]=temp - - for key in range(numVariables): - outputDic['data'][str(key)] = np.empty(0) - outputDic['data'][str(key)] = matrix[:,key] - outputDic['dims'][str(key)] = [] - # add meta variables back - for key in inputDict['metaKeys']: - outputDic['data'][key] = inputDict['data'][key] - - self.transformationSettings['vars'] = copy.deepcopy(self.features) - self.transformationSettings['timeLength'] = historyLength - self.transformationSettings['timeAxis'] = inputDict['data'][self.pivotParameter][0] - self.transformationSettings['dimID'] = outputDic['data'].keys() - - return outputDic - - def _inverse(self,inputDic): - """ - This method is aimed to return the inverse of the action of this PostProcessor - @ In, inputDic, dict, dictionary which contains the transformed data of this PP - @ Out, data, dict, the dictionary containing the inverse of the data (the orginal space) - """ - data = {} - for hist in inputDic.keys(): - data[hist]= {} - tempData = inputDic[hist].reshape((len(self.transformationSettings['vars']),self.transformationSettings['timeLength'])) - for index,var in enumerate(self.transformationSettings['vars']): - data[hist][var] = tempData[index,:] - data[hist][self.pivotParameter] = self.transformationSettings['timeAxis'] - - return data + inpVars, outVars, data = inputIn['Data'][0] + if self.features == 'all': + self.features = outVars + outDataset = data.drop_dims(self.pivotParameter) + featDataset = data[self.features] + if featDataset[self.features[-1]].isnull().sum() > 0: + self.raiseAnError(IOError, 'Found misalignment in provided DataObject!') + numRlz = data.dims['RAVEN_sample_ID'] + featData = featDataset.to_array().values.transpose(1, 0, 2).reshape(numRlz, -1) + varNames = [str(i) for i in range(featData.shape[-1])] + convertedFeat = xr.DataArray(featData, dims=('RAVEN_sample_ID', 'outVars'), coords={'RAVEN_sample_ID':data['RAVEN_sample_ID'], 'outVars':varNames}) + convertedFeatDataset = convertedFeat.to_dataset(dim='outVars') + outDataset = xr.merge([outDataset, convertedFeatDataset]) + return outDataset diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_HS2PS.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_HS2PS.xml index 782f3b125c..aec034fada 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_HS2PS.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_HS2PS.xml @@ -9,7 +9,7 @@ Tests of the HS2PS interfaced post-processor - + HS2PS FirstMRun,PP @@ -40,8 +40,7 @@ sigma,rho,beta,x,y,z,time,x0,y0,z0 - - HS2PS + time From 9dfccd2a660b6de21d87353b62fb7d9be58f03ab Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Tue, 27 Apr 2021 22:12:41 -0600 Subject: [PATCH 28/51] update --- framework/PostProcessorFunctions/HS2PS.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/PostProcessorFunctions/HS2PS.py b/framework/PostProcessorFunctions/HS2PS.py index c654208ba1..1c0337f9da 100644 --- a/framework/PostProcessorFunctions/HS2PS.py +++ b/framework/PostProcessorFunctions/HS2PS.py @@ -97,7 +97,7 @@ def run(self,inputIn): """ This method performs the actual transformation of the data object from history set to point set @ In, inputIn, list, list of datasets which contains the data inside the input DataObjects - @ Out, output, xarray.Dataset, output dataset + @ Out, outDataset, xarray.Dataset, output dataset """ inpVars, outVars, data = inputIn['Data'][0] if self.features == 'all': From db587a1ed79c20e61af0f2e1ecada2cf28031969 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Wed, 28 Apr 2021 22:26:46 -0600 Subject: [PATCH 29/51] add metakeys from input dataobjects to output dataobjects --- framework/PluginsBaseClasses/PostProcessorPluginBase.py | 4 ++++ tests/framework/PostProcessors/InterfacedPostProcessor/tests | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/framework/PluginsBaseClasses/PostProcessorPluginBase.py b/framework/PluginsBaseClasses/PostProcessorPluginBase.py index 393af5ad6c..91c3d1bb9e 100644 --- a/framework/PluginsBaseClasses/PostProcessorPluginBase.py +++ b/framework/PluginsBaseClasses/PostProcessorPluginBase.py @@ -86,6 +86,10 @@ def initialize(self, runInfo, inputs, initDict=None): @ In, initDict, dict, optional, dictionary of all objects available in the step is using this model """ super().initialize(runInfo, inputs, initDict) + ## add meta keys from input data objects + for inputObj in inputs: + metaKeys = inputObj.getVars('meta') + self.addMetaKeys(metaKeys) def _handleInput(self, paramInput): """ diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/tests b/tests/framework/PostProcessors/InterfacedPostProcessor/tests index 55f558db19..ee007314db 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/tests +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/tests @@ -63,7 +63,6 @@ type = 'RavenFramework' input = 'test_HS2PS.xml' csv = 'HS2PS/PrintPPPointSet_dump.csv' - output = 'HS2PS/PrintPPPointSet_dump.xml' [../] [./interfacedPostProcessor_PointSet] type = 'RavenFramework' From 31ce4f66b7578ac0ae3b02e659e5a53f885680ed Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 29 Apr 2021 10:23:08 -0600 Subject: [PATCH 30/51] update --- .../Models/PostProcessors/DataClassifier.py | 3 ++- .../PostProcessors/RiskMeasuresDiscrete.py | 10 ++++++---- .../PostProcessorPluginBase.py | 19 +++++++++++++++---- framework/PostProcessorFunctions/HS2PS.py | 1 + 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/framework/Models/PostProcessors/DataClassifier.py b/framework/Models/PostProcessors/DataClassifier.py index 5fe09d6280..dae2aaa6e9 100644 --- a/framework/Models/PostProcessors/DataClassifier.py +++ b/framework/Models/PostProcessors/DataClassifier.py @@ -119,7 +119,8 @@ def identifyInputs(self, inputData): haveClassifier = False haveTarget = False requiredKeys = list(self.mapping.keys()) + [self.label] - for inputDict in currentInput: + for inputTuple in currentInput: + _, _, inputDict = inputTuple if inputDict['type'] not in ['PointSet', 'HistorySet']: self.raiseAnError(IOError, "The input for this postprocesor", self.name, "is not acceptable! Allowed inputs are 'PointSet' and 'HistorySet'.") dataType = None diff --git a/framework/Models/PostProcessors/RiskMeasuresDiscrete.py b/framework/Models/PostProcessors/RiskMeasuresDiscrete.py index fff12f2d75..9bc874e8d1 100644 --- a/framework/Models/PostProcessors/RiskMeasuresDiscrete.py +++ b/framework/Models/PostProcessors/RiskMeasuresDiscrete.py @@ -171,8 +171,9 @@ def run(self,inputIn): inputDic = inputIn['Data'] checkHSs=0 for inp in inputDic: - if inp['type'] == 'HistorySet': - timeDepData = copy.deepcopy(inp) + _, _, inpDs = inp + if inpDs['type'] == 'HistorySet': + timeDepData = copy.deepcopy(inpDs) inputDic.remove(inp) checkHSs +=1 @@ -207,7 +208,7 @@ def run(self,inputIn): # replicate metadata # add meta variables back - for key in inputDic[-1]['metaKeys']: + for key in inputDic[-1][-1]['metaKeys']: outputDic['data'][key] = np.asanyarray(1.0) return outputDic @@ -234,7 +235,8 @@ def runStatic(self,inputDic, componentConfig=None): r1Low = self.variables[variable]['R1low'] r1High = self.variables[variable]['R1high'] - for inp in inputDic: + for inpInfo in inputDic: + _, _, inp = inpInfo ## Get everything out of the inputDic at the outset, the hope is to have no string literals on the interior ## of this function. if componentConfig is None: diff --git a/framework/PluginsBaseClasses/PostProcessorPluginBase.py b/framework/PluginsBaseClasses/PostProcessorPluginBase.py index 91c3d1bb9e..651f221385 100644 --- a/framework/PluginsBaseClasses/PostProcessorPluginBase.py +++ b/framework/PluginsBaseClasses/PostProcessorPluginBase.py @@ -58,6 +58,15 @@ def __init__(self): super().__init__() self._inputDataType = 'dict' # Current accept two types: 1) 'dict', 2) 'xrDataset' # Set default to 'dict', this is consistent with current post-processors + self._keepInputMeta = False # Meta keys from input data objects will be added to output data objects + + def keepInputMeta(self, keep=False): + """ + Method to set the status of "self._keepInputMeta" + @ In, keep, bool, If True, the meta keys from input data objects will be added to output data objects + @ Out, None + """ + self._keepInputMeta = keep def setInputDataType(self, dataType='dict'): """ @@ -86,10 +95,12 @@ def initialize(self, runInfo, inputs, initDict=None): @ In, initDict, dict, optional, dictionary of all objects available in the step is using this model """ super().initialize(runInfo, inputs, initDict) - ## add meta keys from input data objects - for inputObj in inputs: - metaKeys = inputObj.getVars('meta') - self.addMetaKeys(metaKeys) + if self._keepInputMeta: + ## add meta keys from input data objects + for inputObj in inputs: + if isinstance(inputObj, DataObject.DataObject): + metaKeys = inputObj.getVars('meta') + self.addMetaKeys(metaKeys) def _handleInput(self, paramInput): """ diff --git a/framework/PostProcessorFunctions/HS2PS.py b/framework/PostProcessorFunctions/HS2PS.py index 1c0337f9da..59115823ff 100644 --- a/framework/PostProcessorFunctions/HS2PS.py +++ b/framework/PostProcessorFunctions/HS2PS.py @@ -63,6 +63,7 @@ def __init__(self): self.pivotParameter = None self.features = 'all' self.setInputDataType('xrDataset') + self.keepInputMeta(True) def initialize(self, runInfo, inputs, initDict=None): """ From 2c9522a6276936422266619da571cac67b9b184e Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 29 Apr 2021 10:58:03 -0600 Subject: [PATCH 31/51] move HS2PS to PostProcessors --- framework/Models/PostProcessors/Factory.py | 1 + .../{PostProcessorFunctions => Models/PostProcessors}/HS2PS.py | 0 2 files changed, 1 insertion(+) rename framework/{PostProcessorFunctions => Models/PostProcessors}/HS2PS.py (100%) diff --git a/framework/Models/PostProcessors/Factory.py b/framework/Models/PostProcessors/Factory.py index 8a9945d88f..35cac83e8b 100644 --- a/framework/Models/PostProcessors/Factory.py +++ b/framework/Models/PostProcessors/Factory.py @@ -42,6 +42,7 @@ from .RiskMeasuresDiscrete import RiskMeasuresDiscrete from .Validation import Validation from .HistorySetDelay import HistorySetDelay +from .HS2PS import HS2PS ## These utilize the optional prequisite library PySide, so don't error if they ## do not import appropriately. try: diff --git a/framework/PostProcessorFunctions/HS2PS.py b/framework/Models/PostProcessors/HS2PS.py similarity index 100% rename from framework/PostProcessorFunctions/HS2PS.py rename to framework/Models/PostProcessors/HS2PS.py From d2a19caaaede94de46272a6f21ec37fae008ad93 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 6 May 2021 13:59:34 -0600 Subject: [PATCH 32/51] convert HStoPSOperator --- framework/Models/PostProcessors/HS2PS.py | 28 ++- .../PostProcessorPluginBase.py | 16 +- .../PostProcessorFunctions/HStoPSOperator.py | 204 ++++++++++-------- .../test_HistorySetToPointSetOperators.xml | 35 ++- 4 files changed, 163 insertions(+), 120 deletions(-) diff --git a/framework/Models/PostProcessors/HS2PS.py b/framework/Models/PostProcessors/HS2PS.py index 59115823ff..1d6f8a9618 100644 --- a/framework/Models/PostProcessors/HS2PS.py +++ b/framework/Models/PostProcessors/HS2PS.py @@ -75,9 +75,9 @@ def initialize(self, runInfo, inputs, initDict=None): """ super().initialize(runInfo, inputs, initDict) if len(inputs)>1: - self.raiseAnError(IOError, 'HS2PS Post-Processor', self.name, 'accepts only one dataObject') + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one dataObject') if inputs[0].type != 'HistorySet': - self.raiseAnError(IOError, 'HS2PS Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type)) + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type)) def _handleInput(self, paramInput): """ @@ -97,7 +97,9 @@ def _handleInput(self, paramInput): def run(self,inputIn): """ This method performs the actual transformation of the data object from history set to point set - @ In, inputIn, list, list of datasets which contains the data inside the input DataObjects + @ In, inputIn, dict, dictionary of data. + inputIn = {'Data':listData, 'Files':listOfFiles}, + listData has the following format: (listOfInputVars, listOfOutVars, xr.Dataset) @ Out, outDataset, xarray.Dataset, output dataset """ inpVars, outVars, data = inputIn['Data'][0] @@ -113,4 +115,24 @@ def run(self,inputIn): convertedFeat = xr.DataArray(featData, dims=('RAVEN_sample_ID', 'outVars'), coords={'RAVEN_sample_ID':data['RAVEN_sample_ID'], 'outVars':varNames}) convertedFeatDataset = convertedFeat.to_dataset(dim='outVars') outDataset = xr.merge([outDataset, convertedFeatDataset]) + ## self.transformationSettings is used by _inverse method when doing DataMining + self.transformationSettings['vars'] = copy.deepcopy(self.features) + self.transformationSettings['timeLength'] = data[self.pivotParameter].size + self.transformationSettings['timeAxis'] = data[self.pivotParameter][0] + self.transformationSettings['dimID'] = list(outDataset.keys()) return outDataset + + def _inverse(self,inputDic): + """ + This method is aimed to return the inverse of the action of this PostProcessor + @ In, inputDic, dict, dictionary which contains the transformed data of this PP + @ Out, data, dict, the dictionary containing the inverse of the data (the orginal space) + """ + data = {} + for hist in inputDic.keys(): + data[hist]= {} + tempData = inputDic[hist].reshape((len(self.transformationSettings['vars']),self.transformationSettings['timeLength'])) + for index,var in enumerate(self.transformationSettings['vars']): + data[hist][var] = tempData[index,:] + data[hist][self.pivotParameter] = self.transformationSettings['timeAxis'] + return data diff --git a/framework/PluginsBaseClasses/PostProcessorPluginBase.py b/framework/PluginsBaseClasses/PostProcessorPluginBase.py index 651f221385..c0deea2ee0 100644 --- a/framework/PluginsBaseClasses/PostProcessorPluginBase.py +++ b/framework/PluginsBaseClasses/PostProcessorPluginBase.py @@ -131,7 +131,21 @@ def createPostProcessorInput(self, inputObjs, **kwargs): @ In, **kwargs, dict, is a dictionary that contains the information passed by "Step". Currently not used by PostProcessor. It can be useful by Step to control the input and output of the PostProcessor, as well as other control options for the PostProcessor - @ Out, inputDict, list, list of data set that will be directly used by the "PostProcessor.run" method. + @ Out, inputDict, dict, dictionary of data that will be directly used by the "PostProcessor.run" method. + inputDict = {'Data':listData, 'Files':listOfFiles}, + listData has the following format if 'xrDataset' is passed to self.setInputDataType('xrDataset') + (listOfInputVars, listOfOutVars, xr.Dataset) + Otherwise listData has the following format: (listOfInputVars, listOfOutVars, DataDict) with + DataDict is a dictionary that has the format + dataDict['dims'] = dict {varName:independentDimensions} + dataDict['metadata'] = dict {metaVarName:metaVarValue} + dataDict['type'] = str TypeOfDataObject + dataDict['inpVars'] = list of input variables + dataDict['outVars'] = list of output variables + dataDict['numberRealization'] = int SizeOfDataObject + dataDict['name'] = str DataObjectName + dataDict['metaKeys'] = list of meta variables + dataDict['data'] = dict {varName: varValue(1-D or 2-D numpy array)} """ #### TODO: This method probably need to move to PostProcessor Base Class when we have converted #### all internal PostProcessors to use Dataset diff --git a/framework/PostProcessorFunctions/HStoPSOperator.py b/framework/PostProcessorFunctions/HStoPSOperator.py index 7759adb4ae..759f7b3518 100644 --- a/framework/PostProcessorFunctions/HStoPSOperator.py +++ b/framework/PostProcessorFunctions/HStoPSOperator.py @@ -16,21 +16,19 @@ @author: alfoa """ -#for future compatibility with Python 3-------------------------------------------------------------- -from __future__ import division, print_function, unicode_literals, absolute_import -#End compatibility block for Python 3---------------------------------------------------------------- - #External Modules------------------------------------------------------------------------------------ import os import copy import numpy as np #External Modules End-------------------------------------------------------------------------------- -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase, CheckInterfacePP +#Internal Modules--------------------------------------------------------------- +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes +#Internal Modules End----------------------------------------------------------- -class HStoPSOperator(PostProcessorInterfaceBase): +class HStoPSOperator(PostProcessorPluginBase): """ This Post-Processor performs the conversion from HistorySet to PointSet The conversion is performed based on any of the following operations: @@ -48,31 +46,41 @@ class cls. specifying input of cls. """ inputSpecification = super().getInputSpecification() - inputSpecification.setCheckClass(CheckInterfacePP("HStoPSOperator")) inputSpecification.addSub(InputData.parameterInputFactory("pivotParameter", contentType=InputTypes.StringType)) inputSpecification.addSub(InputData.parameterInputFactory("row", contentType=InputTypes.FloatType)) inputSpecification.addSub(InputData.parameterInputFactory("pivotValue", contentType=InputTypes.FloatType)) inputSpecification.addSub(InputData.parameterInputFactory("operator", contentType=InputTypes.StringType)) PivotStategyType = InputTypes.makeEnumType("PivotStategy", "PivotStategyType", ['nearest','floor','ceiling','interpolate']) inputSpecification.addSub(InputData.parameterInputFactory("pivotStrategy", contentType=PivotStategyType)) - #Should method be in super class? - inputSpecification.addSub(InputData.parameterInputFactory("method", contentType=InputTypes.StringType)) - return inputSpecification - def initialize(self): + def __init__(self): """ - Method to initialize the Interfaced Post-processor - @ In, None - @ Out, None + Constructor + @ In, None + @ Out, None """ - PostProcessorInterfaceBase.initialize(self) - self.inputFormat = 'HistorySet' + super().__init__() self.outputFormat = 'PointSet' - #pivotParameter identify the ID of the temporal variable in the data set based on which - # the operations are performed. Optional (defaul=time) - self.pivotParameter = 'time' + self.pivotParameter = 'time' #pivotParameter identify the ID of the temporal variabl self.settings = {'operationType':None,'operationValue':None,'pivotStrategy':'nearest'} + self.setInputDataType('dict') + self.keepInputMeta(True) + self.outputMultipleRealizations = True # True indicate multiple realizations are returned + + def initialize(self, runInfo, inputs, initDict=None): + """ + Method to initialize the DataClassifier post-processor. + @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) + @ In, inputs, list, list of inputs + @ In, initDict, dict, optional, dictionary with initialization options + @ Out, None + """ + super().initialize(runInfo, inputs, initDict) + if len(inputs)>1: + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one dataObject') + if inputs[0].type != 'HistorySet': + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type)) def _handleInput(self, paramInput): """ @@ -80,7 +88,6 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - foundPivot = False for child in paramInput.subparts: if child.getName() == 'pivotParameter': @@ -90,7 +97,7 @@ def _handleInput(self, paramInput): self.settings['operationValue'] = child.value elif child.getName() == 'pivotStrategy': self.settings[child.getName()] = child.value.strip() - elif child.getName() !='method': + else: self.raiseAnError(IOError, 'XML node ' + str(child.tag) + ' is not recognized') if not foundPivot: self.raiseAWarning('"pivotParameter" is not inputted! Default is "'+ self.pivotParameter +'"!') @@ -99,82 +106,91 @@ def _handleInput(self, paramInput): if self.settings['operationType'] == 'operator' and self.settings['operationValue'] not in ['max','min','average','all']: self.raiseAnError(IOError, '"operator" can be either "max", "min", "average" or "all"!') - def run(self,inputDic): + def run(self,inputIn): """ This method performs the actual transformation of the data object from history set to point set - @ In, inputDic, list, list of dictionaries which contains the data inside the input DataObjects + @ In, inputIn, dict, dictionaries which contains the data inside the input DataObjects + inputIn = {'Data':listData, 'Files':listOfFiles}, + listData has the following format: (listOfInputVars, listOfOutVars, DataDict) with + DataDict is a dictionary that has the format + dataDict['dims'] = dict {varName:independentDimensions} + dataDict['metadata'] = dict {metaVarName:metaVarValue} + dataDict['type'] = str TypeOfDataObject + dataDict['inpVars'] = list of input variables + dataDict['outVars'] = list of output variables + dataDict['numberRealization'] = int SizeOfDataObject + dataDict['name'] = str DataObjectName + dataDict['metaKeys'] = list of meta variables + dataDict['data'] = dict {varName: varValue(1-D or 2-D numpy array)} @ Out, outputDic, dict, output dictionary """ - if len(inputDic)>1: - self.raiseAnError(IOError, 'Only one DataObject is accepted!') - else: - inputDict = inputDic[0] - outputDic = {'data': {}} - outputDic['dims'] = {} - numSamples = inputDict['numberRealizations'] - - # generate the input part and metadata of the output dictionary - outputDic['data'].update(inputDict['data']) - - # generate the output part of the output dictionary + _, _, inputDict = inputIn['Data'][0] + outputDic = {'data': {}} + outputDic['dims'] = {} + numSamples = inputDict['numberRealization'] + + # generate the input part and metadata of the output dictionary + outputDic['data'].update(inputDict['data']) + + # generate the output part of the output dictionary + for outputVar in inputDict['outVars']: + outputDic['data'][outputVar] = np.empty(0) + + # check if pivot value is present + if self.settings['operationType'] == 'pivotValue': + if self.pivotParameter not in inputDict['data']: + self.raiseAnError(RuntimeError,'Pivot Variable "'+str(self.pivotParameter)+'" not found in data !') + + if self.settings['operationValue'] == 'all': + #First of all make a new input variable of the time samples + origPivot = inputDict['data'][self.pivotParameter] + newPivot = np.concatenate(origPivot) + outputDic['data'][self.pivotParameter] = newPivot + #next, expand each of the input and meta parameters by duplicating them + for inVar in inputDict['inpVars']+inputDict['metaKeys']: + origSamples = outputDic['data'][inVar] + outputDic['data'][inVar] = np.empty(0) + for hist in range(numSamples): + #for each sample, need to expand since same in each time sample + outputDic['data'][inVar] = np.append(outputDic['data'][inVar], + np.full(origPivot[hist].shape, + origSamples[hist])) + + + for hist in range(numSamples): for outputVar in inputDict['outVars']: - outputDic['data'][outputVar] = np.empty(0) - - # check if pivot value is present - if self.settings['operationType'] == 'pivotValue': - if self.pivotParameter not in inputDict['data']: - self.raiseAnError(RuntimeError,'Pivot Variable "'+str(self.pivotParameter)+'" not found in data !') - - if self.settings['operationValue'] == 'all': - #First of all make a new input variable of the time samples - origPivot = inputDict['data'][self.pivotParameter] - newPivot = np.concatenate(origPivot) - outputDic['data'][self.pivotParameter] = newPivot - #next, expand each of the input and meta parameters by duplicating them - for inVar in inputDict['inpVars']+inputDict['metaKeys']: - origSamples = outputDic['data'][inVar] - outputDic['data'][inVar] = np.empty(0) - for hist in range(numSamples): - #for each sample, need to expand since same in each time sample - outputDic['data'][inVar] = np.append(outputDic['data'][inVar], - np.full(origPivot[hist].shape, - origSamples[hist])) - - - for hist in range(numSamples): - for outputVar in inputDict['outVars']: - if self.settings['operationType'] == 'row': - if int(self.settings['operationValue']) >= len(inputDict['data'][outputVar][hist]): - self.raiseAnError(RuntimeError,'row value > of size of history "'+str(hist)+'" !') - outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(inputDict['data'][outputVar][hist][int(self.settings['operationValue'])])) - elif self.settings['operationType'] == 'pivotValue': - if self.settings['pivotStrategy'] in ['nearest','floor','ceiling']: - idx = (np.abs(np.asarray(outputDic['data'][self.pivotParameter][hist])-float(self.settings['operationValue']))).argmin() - if self.settings['pivotStrategy'] == 'floor': - if np.asarray(outputDic['data'][self.pivotParameter][hist])[idx] > self.settings['operationValue']: - idx-=1 - if self.settings['pivotStrategy'] == 'ceiling': - if np.asarray(outputDic['data'][self.pivotParameter][hist])[idx] < self.settings['operationValue']: - idx+=1 - outputDic['data'][self.pivotParameter][hist] - if idx > len(inputDict['data'][outputVar][hist]): - idx = len(inputDict['data'][outputVar][hist])-1 - elif idx < 0: - idx = 0 - outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(inputDict['data'][outputVar][hist][idx])) - else: - # interpolate - interpValue = np.interp(self.settings['operationValue'], np.asarray(inputDict['data'][self.pivotParameter][hist]), np.asarray(inputDict['data'][outputVar][hist])) - outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], interpValue) + if self.settings['operationType'] == 'row': + if int(self.settings['operationValue']) >= len(inputDict['data'][outputVar][hist]): + self.raiseAnError(RuntimeError,'row value > of size of history "'+str(hist)+'" !') + outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(inputDict['data'][outputVar][hist][int(self.settings['operationValue'])])) + elif self.settings['operationType'] == 'pivotValue': + if self.settings['pivotStrategy'] in ['nearest','floor','ceiling']: + idx = (np.abs(np.asarray(outputDic['data'][self.pivotParameter][hist])-float(self.settings['operationValue']))).argmin() + if self.settings['pivotStrategy'] == 'floor': + if np.asarray(outputDic['data'][self.pivotParameter][hist])[idx] > self.settings['operationValue']: + idx-=1 + if self.settings['pivotStrategy'] == 'ceiling': + if np.asarray(outputDic['data'][self.pivotParameter][hist])[idx] < self.settings['operationValue']: + idx+=1 + outputDic['data'][self.pivotParameter][hist] + if idx > len(inputDict['data'][outputVar][hist]): + idx = len(inputDict['data'][outputVar][hist])-1 + elif idx < 0: + idx = 0 + outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(inputDict['data'][outputVar][hist][idx])) else: - # operator - if self.settings['operationValue'] == 'max': - outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(np.max(inputDict['data'][outputVar][hist]))) - elif self.settings['operationValue'] == 'min': - outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(np.min(inputDict['data'][outputVar][hist]))) - elif self.settings['operationValue'] == 'average': - outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(np.mean(inputDict['data'][outputVar][hist]))) - elif self.settings['operationValue'] == 'all': - outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(inputDict['data'][outputVar][hist])) - - return outputDic + # interpolate + interpValue = np.interp(self.settings['operationValue'], np.asarray(inputDict['data'][self.pivotParameter][hist]), np.asarray(inputDict['data'][outputVar][hist])) + outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], interpValue) + else: + # operator + if self.settings['operationValue'] == 'max': + outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(np.max(inputDict['data'][outputVar][hist]))) + elif self.settings['operationValue'] == 'min': + outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(np.min(inputDict['data'][outputVar][hist]))) + elif self.settings['operationValue'] == 'average': + outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(np.mean(inputDict['data'][outputVar][hist]))) + elif self.settings['operationValue'] == 'all': + outputDic['data'][outputVar] = np.append(outputDic['data'][outputVar], copy.deepcopy(inputDict['data'][outputVar][hist])) + + return outputDic diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_HistorySetToPointSetOperators.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_HistorySetToPointSetOperators.xml index e4ea7b0431..c7eb69f76a 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_HistorySetToPointSetOperators.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_HistorySetToPointSetOperators.xml @@ -11,7 +11,7 @@ 1) row-based projection, 2) pivot-value-based projection and 3) operator-based projection (max, min, average) - + HStoPSoperators @@ -33,50 +33,41 @@ sigma,rho,beta,x,y,z,time,x0,y0,z0 - - HStoPSOperator + -1 - - HStoPSOperator + time 0.3 - - HStoPSOperator + time 0.3 interpolate - - HStoPSOperator + time 0.3 floor - - HStoPSOperator + time 0.3 ceiling - - HStoPSOperator + time max - - HStoPSOperator + time min - - HStoPSOperator + time average - - HStoPSOperator + time all @@ -142,7 +133,7 @@ x0,y0,z0 OutputPlaceHolder - + x0,y0,z0 x,y,z @@ -179,13 +170,13 @@ x0,y0,z0,time x,y,z - + x0,y0,z0 time,x,y,z - + From abc7e49b41df7e48889edebd9b8a5857737b9fae Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 6 May 2021 14:50:49 -0600 Subject: [PATCH 33/51] convert histset sampling and historyset sync pp --- framework/DataObjects/DataSet.py | 2 +- .../PostProcessorFunctions/HStoPSOperator.py | 4 +- .../HistorySetSampling.py | 73 ++++---- .../PostProcessorFunctions/HistorySetSync.py | 170 ++++++++++-------- .../test_historySetSamplingIPP.xml | 17 +- .../test_historySetSamplingIntervalAve.xml | 17 +- .../test_historySetSampling_and_sync.xml | 8 +- .../test_historySetSync.xml | 8 +- .../test_historySetSyncAll.xml | 3 +- .../test_historySetSyncMax.xml | 3 +- .../test_historySetSyncMin.xml | 3 +- 11 files changed, 160 insertions(+), 148 deletions(-) diff --git a/framework/DataObjects/DataSet.py b/framework/DataObjects/DataSet.py index 49fa2506d1..aafca3f91b 100644 --- a/framework/DataObjects/DataSet.py +++ b/framework/DataObjects/DataSet.py @@ -1229,7 +1229,7 @@ def _convertToDict(self): dataDict['type'] = self.type dataDict['inpVars'] = self.getVars('input') dataDict['outVars'] = self.getVars('output') - dataDict['numberRealization'] = self.size + dataDict['numberRealizations'] = self.size dataDict['name'] = self.name dataDict['metaKeys'] = self.getVars('meta') # main data diff --git a/framework/PostProcessorFunctions/HStoPSOperator.py b/framework/PostProcessorFunctions/HStoPSOperator.py index 759f7b3518..1cb29e3db4 100644 --- a/framework/PostProcessorFunctions/HStoPSOperator.py +++ b/framework/PostProcessorFunctions/HStoPSOperator.py @@ -61,12 +61,12 @@ def __init__(self): @ Out, None """ super().__init__() - self.outputFormat = 'PointSet' self.pivotParameter = 'time' #pivotParameter identify the ID of the temporal variabl self.settings = {'operationType':None,'operationValue':None,'pivotStrategy':'nearest'} self.setInputDataType('dict') self.keepInputMeta(True) self.outputMultipleRealizations = True # True indicate multiple realizations are returned + self.validDataType = ['PointSet'] # The list of accepted types of DataObject def initialize(self, runInfo, inputs, initDict=None): """ @@ -127,7 +127,7 @@ def run(self,inputIn): _, _, inputDict = inputIn['Data'][0] outputDic = {'data': {}} outputDic['dims'] = {} - numSamples = inputDict['numberRealization'] + numSamples = inputDict['numberRealizations'] # generate the input part and metadata of the output dictionary outputDic['data'].update(inputDict['data']) diff --git a/framework/PostProcessorFunctions/HistorySetSampling.py b/framework/PostProcessorFunctions/HistorySetSampling.py index f2cf7e6cae..50e3c54918 100644 --- a/framework/PostProcessorFunctions/HistorySetSampling.py +++ b/framework/PostProcessorFunctions/HistorySetSampling.py @@ -16,20 +16,16 @@ @author: mandd """ - -from __future__ import division, print_function, unicode_literals, absolute_import -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase, CheckInterfacePP - - import os import numpy as np from scipy import interpolate from scipy import integrate import copy +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes -class HistorySetSampling(PostProcessorInterfaceBase): +class HistorySetSampling(PostProcessorPluginBase): """ This Post-Processor performs the conversion from HistorySet to HistorySet The conversion is made so that each history H is re-sampled accordingly to a specific sampling strategy. @@ -45,7 +41,6 @@ class cls. specifying input of cls. """ inputSpecification = super().getInputSpecification() - inputSpecification.setCheckClass(CheckInterfacePP("HistorySetSampling")) HSSamplingType = InputTypes.makeEnumType("HSSampling", "HSSamplingType", ['uniform','firstDerivative','secondDerivative','filteredFirstDerivative','filteredSecondDerivative']) inputSpecification.addSub(InputData.parameterInputFactory("samplingType", contentType=HSSamplingType)) inputSpecification.addSub(InputData.parameterInputFactory("numberOfSamples", contentType=InputTypes.IntegerType)) @@ -53,35 +48,45 @@ class cls. inputSpecification.addSub(InputData.parameterInputFactory("pivotParameter", contentType=InputTypes.StringType)) HSInterpolationType = InputTypes.makeEnumType("HSInterpolation", "HSInterpolationType", ['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'intervalAverage']) inputSpecification.addSub(InputData.parameterInputFactory("interpolation", contentType=HSInterpolationType)) - #Should method be in super class? - inputSpecification.addSub(InputData.parameterInputFactory("method", contentType=InputTypes.StringType)) return inputSpecification - def initialize(self): + def __init__(self): """ - Method to initialize the Interfaced Post-processor - @ In, None, - @ Out, None, - + Constructor + @ In, None + @ Out, None """ - - PostProcessorInterfaceBase.initialize(self) - self.inputFormat = 'HistorySet' - self.outputFormat = 'HistorySet' - + super().__init__() + self.pivotParameter = None #pivotParameter identify the ID of the temporal variabl + self.setInputDataType('dict') + self.keepInputMeta(True) + self.outputMultipleRealizations = True # True indicate multiple realizations are returned + self.validDataType = ['HistorySet'] # The list of accepted types of DataObject self.samplingType = None self.numberOfSamples = None self.tolerance = None - self.pivotParameter = None self.interpolation = None + def initialize(self, runInfo, inputs, initDict=None): + """ + Method to initialize the DataClassifier post-processor. + @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) + @ In, inputs, list, list of inputs + @ In, initDict, dict, optional, dictionary with initialization options + @ Out, None + """ + super().initialize(runInfo, inputs, initDict) + if len(inputs)>1: + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one dataObject') + if inputs[0].type != 'HistorySet': + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type)) + def _handleInput(self, paramInput): """ Function to handle the parameter input. @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - for child in paramInput.subparts: if child.getName() == 'samplingType': self.samplingType = child.value @@ -93,7 +98,7 @@ def _handleInput(self, paramInput): self.pivotParameter = child.value elif child.getName() == 'interpolation': self.interpolation = child.value - elif child.getName() !='method': + else: self.raiseAnError(IOError, 'HistorySetSampling Interfaced Post-Processor ' + str(self.name) + ' : XML node ' + str(child) + ' is not recognized') if self.pivotParameter is None: @@ -106,19 +111,25 @@ def _handleInput(self, paramInput): if self.tolerance is None or self.tolerance < 0.0: self.raiseAnError(IOError, 'HistorySetSampling Interfaced Post-Processor ' + str(self.name) + ' : tolerance is not specified or less than 0') - - def run(self,inputDic): + def run(self,inputIn): """ Method to post-process the dataObjects - @ In, inputDic, list, list of dictionaries which contains the data inside the input DataObjects + @ In, inputIn, dict, dictionaries which contains the data inside the input DataObjects + inputIn = {'Data':listData, 'Files':listOfFiles}, + listData has the following format: (listOfInputVars, listOfOutVars, DataDict) with + DataDict is a dictionary that has the format + dataDict['dims'] = dict {varName:independentDimensions} + dataDict['metadata'] = dict {metaVarName:metaVarValue} + dataDict['type'] = str TypeOfDataObject + dataDict['inpVars'] = list of input variables + dataDict['outVars'] = list of output variables + dataDict['numberRealization'] = int SizeOfDataObject + dataDict['name'] = str DataObjectName + dataDict['metaKeys'] = list of meta variables + dataDict['data'] = dict {varName: varValue(1-D or 2-D numpy array)} @ Out, outputDic, dict, dictionary of resampled histories """ - # check that we only have one data object - if len(inputDic)>1: - self.raiseAnError(IOError, 'HistorySetSampling Interfaced Post-Processor ' + str(self.name) + ' accepts only one dataObject') - - # grab the first (and only) data object - inputDic = inputDic[0] + _, _, inputDic = inputIn['Data'][0] outputDic={'data':{}} # load up the input data into the output for var in inputDic['inpVars']: diff --git a/framework/PostProcessorFunctions/HistorySetSync.py b/framework/PostProcessorFunctions/HistorySetSync.py index df60f6c335..c4c50c8d00 100644 --- a/framework/PostProcessorFunctions/HistorySetSync.py +++ b/framework/PostProcessorFunctions/HistorySetSync.py @@ -15,10 +15,6 @@ Created on October 28, 2015 """ -#for future compatibility with Python 3-------------------------------------------------------------- -from __future__ import division, print_function, unicode_literals, absolute_import -#End compatibility block for Python 3---------------------------------------------------------------- - #External Modules------------------------------------------------------------------------------------ import os import copy @@ -26,11 +22,11 @@ import numpy as np #External Modules End-------------------------------------------------------------------------------- -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase, CheckInterfacePP +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes -class HistorySetSync(PostProcessorInterfaceBase): +class HistorySetSync(PostProcessorPluginBase): """ This Post-Processor performs the conversion from HistorySet to HistorySet The conversion is made so that all histories are syncronized in time. @@ -46,33 +42,42 @@ class cls. specifying input of cls. """ inputSpecification = super().getInputSpecification() - inputSpecification.setCheckClass(CheckInterfacePP("HistorySetSync")) inputSpecification.addSub(InputData.parameterInputFactory("numberOfSamples", contentType=InputTypes.IntegerType)) HSSSyncType = InputTypes.makeEnumType("HSSSync", "HSSSyncType", ['all','grid','max','min']) inputSpecification.addSub(InputData.parameterInputFactory("syncMethod", contentType=HSSSyncType)) inputSpecification.addSub(InputData.parameterInputFactory("pivotParameter", contentType=InputTypes.StringType)) inputSpecification.addSub(InputData.parameterInputFactory("extension", contentType=InputTypes.StringType)) - #Should method be in super class? - inputSpecification.addSub(InputData.parameterInputFactory("method", contentType=InputTypes.StringType)) return inputSpecification - def initialize(self, numberOfSamples=None, pivotParameter=None, extension=None, syncMethod=None): + def __init__(self): """ - Method to initialize the Interfaced Post-processor - @ In, numberOfSamples, int, (default None) - @ In, pivotParameter, str, ID of the pivot paramter (e.g., time) - @ In, extension, type of extension to be employed - @ In, syncMethod, type of syncrhonization method - @ Out, None, + Constructor + @ In, None + @ Out, None """ - PostProcessorInterfaceBase.initialize(self) - self.inputFormat = 'HistorySet' - self.outputFormat = 'HistorySet' - - self.numberOfSamples = numberOfSamples - self.pivotParameter = pivotParameter - self.extension = extension - self.syncMethod = syncMethod + super().__init__() + self.pivotParameter = 'time' #pivotParameter identify the ID of the temporal variabl + self.setInputDataType('dict') + self.keepInputMeta(True) + self.outputMultipleRealizations = True # True indicate multiple realizations are returned + self.validDataType = ['HistorySet'] # The list of accepted types of DataObject + self.numberOfSamples = None + self.extension = None + self.syncMethod = None + + def initialize(self, runInfo, inputs, initDict=None): + """ + Method to initialize the DataClassifier post-processor. + @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) + @ In, inputs, list, list of inputs + @ In, initDict, dict, optional, dictionary with initialization options + @ Out, None + """ + super().initialize(runInfo, inputs, initDict) + if len(inputs)>1: + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one dataObject') + if inputs[0].type != 'HistorySet': + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type)) def _handleInput(self, paramInput): """ @@ -90,7 +95,7 @@ def _handleInput(self, paramInput): self.pivotParameter = child.value elif child.getName() == 'extension': self.extension = child.value - elif child.getName() !='method': + else: self.raiseAnError(IOError, 'HistorySetSync Interfaced Post-Processor ' + str(self.name) + ' : XML node ' + str(child) + ' is not recognized') if self.syncMethod == 'grid' and not isinstance(self.numberOfSamples, int): @@ -100,66 +105,75 @@ def _handleInput(self, paramInput): if self.extension is None or not (self.extension == 'zeroed' or self.extension == 'extended'): self.raiseAnError(IOError, 'HistorySetSync Interfaced Post-Processor ' + str(self.name) + ' : extension type is not correctly specified (either not specified or not one of its possible allowed values: zeroed or extended)') - def run(self,inputDic): + def run(self,inputIn): """ Method to post-process the dataObjects - @ In, inputDic, list, list of dictionaries which contains the data inside the input DataObjects + @ In, inputDic, dict, dictionaries which contains the data inside the input DataObjects + inputIn = {'Data':listData, 'Files':listOfFiles}, + listData has the following format: (listOfInputVars, listOfOutVars, DataDict) with + DataDict is a dictionary that has the format + dataDict['dims'] = dict {varName:independentDimensions} + dataDict['metadata'] = dict {metaVarName:metaVarValue} + dataDict['type'] = str TypeOfDataObject + dataDict['inpVars'] = list of input variables + dataDict['outVars'] = list of output variables + dataDict['numberRealization'] = int SizeOfDataObject + dataDict['name'] = str DataObjectName + dataDict['metaKeys'] = list of meta variables + dataDict['data'] = dict {varName: varValue(1-D or 2-D numpy array)} @ Out, outputPSDic, dict, output dictionary """ - if len(inputDic)>1: - self.raiseAnError(IOError, 'HistorySetSync Interfaced Post-Processor ' + str(self.name) + ' accepts only one dataObject') - else: - inputDic = inputDic[0] - outputDic={} - - newTime = [] - if self.syncMethod == 'grid': - maxEndTime = [] - minInitTime = [] - for hist in inputDic['data'][self.pivotParameter]: - maxEndTime.append(hist[-1]) - minInitTime.append(hist[0]) - maxTime = max(maxEndTime) - minTime = min(minInitTime) - newTime = np.linspace(minTime,maxTime,self.numberOfSamples) - elif self.syncMethod == 'all': - times = [] - for hist in inputDic['data'][self.pivotParameter]: - times.extend(hist) - times = list(set(times)) - times.sort() - newTime = np.array(times) - elif self.syncMethod in ['min','max']: - notableHist = None #set on first iteration - notableLength = None #set on first iteration - - for h,elem in np.ndenumerate(inputDic['data'][self.pivotParameter]): - l=len(elem) - if (h[0] == 0) or (self.syncMethod == 'max' and l > notableLength) or (self.syncMethod == 'min' and l < notableLength): - notableHist = inputDic['data'][self.pivotParameter][h[0]] - notableLength = l - newTime = np.array(notableHist) - - outputDic['data']={} + _, _, inputDic = inputIn['Data'][0] + outputDic={} + + newTime = [] + if self.syncMethod == 'grid': + maxEndTime = [] + minInitTime = [] + for hist in inputDic['data'][self.pivotParameter]: + maxEndTime.append(hist[-1]) + minInitTime.append(hist[0]) + maxTime = max(maxEndTime) + minTime = min(minInitTime) + newTime = np.linspace(minTime,maxTime,self.numberOfSamples) + elif self.syncMethod == 'all': + times = [] + for hist in inputDic['data'][self.pivotParameter]: + times.extend(hist) + times = list(set(times)) + times.sort() + newTime = np.array(times) + elif self.syncMethod in ['min','max']: + notableHist = None #set on first iteration + notableLength = None #set on first iteration + + for h,elem in np.ndenumerate(inputDic['data'][self.pivotParameter]): + l=len(elem) + if (h[0] == 0) or (self.syncMethod == 'max' and l > notableLength) or (self.syncMethod == 'min' and l < notableLength): + notableHist = inputDic['data'][self.pivotParameter][h[0]] + notableLength = l + newTime = np.array(notableHist) + + outputDic['data']={} + for var in inputDic['outVars']: + outputDic['data'][var] = np.zeros(inputDic['numberRealizations'], dtype=object) + outputDic['data'][self.pivotParameter] = np.zeros(inputDic['numberRealizations'], dtype=object) + + for var in inputDic['inpVars']: + outputDic['data'][var] = copy.deepcopy(inputDic['data'][var]) + + for rlz in range(inputDic['numberRealizations']): + outputDic['data'][self.pivotParameter][rlz] = newTime for var in inputDic['outVars']: - outputDic['data'][var] = np.zeros(inputDic['numberRealizations'], dtype=object) - outputDic['data'][self.pivotParameter] = np.zeros(inputDic['numberRealizations'], dtype=object) - - for var in inputDic['inpVars']: - outputDic['data'][var] = copy.deepcopy(inputDic['data'][var]) - - for rlz in range(inputDic['numberRealizations']): - outputDic['data'][self.pivotParameter][rlz] = newTime - for var in inputDic['outVars']: - oldTime = inputDic['data'][self.pivotParameter][rlz] - outputDic['data'][var][rlz] = self.resampleHist(inputDic['data'][var][rlz], oldTime, newTime) + oldTime = inputDic['data'][self.pivotParameter][rlz] + outputDic['data'][var][rlz] = self.resampleHist(inputDic['data'][var][rlz], oldTime, newTime) - # add meta variables back - for key in inputDic['metaKeys']: - outputDic['data'][key] = inputDic['data'][key] - outputDic['dims'] = copy.deepcopy(inputDic['dims']) + # add meta variables back + for key in inputDic['metaKeys']: + outputDic['data'][key] = inputDic['data'][key] + outputDic['dims'] = copy.deepcopy(inputDic['dims']) - return outputDic + return outputDic def resampleHist(self, variable, oldTime, newTime): """ diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSamplingIPP.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSamplingIPP.xml index 7ce686ab44..d6d14235d1 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSamplingIPP.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSamplingIPP.xml @@ -12,7 +12,7 @@ Test the defect issue #739 - + historySampling FirstMRun,PP1,plot1,PP2,plot2,PP3,plot3,PP4,plot4,PP5,plot5 @@ -92,35 +92,30 @@ sigma,rho,beta,x,y,z,time,x0,y0,z0 - - HistorySetSampling + uniform 10 time cubic - - HistorySetSampling + firstDerivative 10 time cubic - - HistorySetSampling + secondDerivative 10 time cubic - - HistorySetSampling + filteredFirstDerivative time 200 - - HistorySetSampling + filteredSecondDerivative time 5000 diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSamplingIntervalAve.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSamplingIntervalAve.xml index a9e0b2a844..47e9c61b9f 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSamplingIntervalAve.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSamplingIntervalAve.xml @@ -9,10 +9,10 @@ Tests of the HistorySetSampling post-processor with intervalAverage interpolation technique - + historySamplingIntervalAve - readIn,pp,plot + readIn,pp,plot 1 @@ -23,7 +23,7 @@ windTypDataFile - windTypData + windTypData windTypData @@ -37,17 +37,16 @@ plot_interval_average - + - - HistorySetSampling + uniform 24 Time intervalAverage - + csv @@ -76,7 +75,7 @@ - + x @@ -84,7 +83,7 @@ Time - + x Time,Speed diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSampling_and_sync.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSampling_and_sync.xml index cbcee12621..6aaeee506e 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSampling_and_sync.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSampling_and_sync.xml @@ -13,7 +13,7 @@ Created test to show how to perform the sync - + historySetSampling_and_sync FirstMRun,PP1,sync1 @@ -45,15 +45,13 @@ sigma,rho,beta,x,y,z,time,x0,y0,z0 - - HistorySetSampling + uniform 10 time cubic - - HistorySetSync + all time extended diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSync.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSync.xml index 43fb5c08fd..5a0de976b4 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSync.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSync.xml @@ -9,7 +9,7 @@ Tests of the HistorySetSync interfaced post-processor - + HistorySetSync FirstMRun,PP1,plot1,PP2,plot2 @@ -56,15 +56,13 @@ sigma,rho,beta,x,y,z,time,x0,y0,z0 - - HistorySetSync + 20 time zeroed grid - - HistorySetSync + grid 20 time diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncAll.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncAll.xml index 818863d9e7..76903ff1cd 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncAll.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncAll.xml @@ -84,8 +84,7 @@ python - - HistorySetSync + all t extended diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncMax.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncMax.xml index c969291cfb..9f1863bc4a 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncMax.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncMax.xml @@ -84,8 +84,7 @@ python - - HistorySetSync + max t extended diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncMin.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncMin.xml index 333ea7a0f0..edf4edbfd8 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncMin.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSyncMin.xml @@ -84,8 +84,7 @@ python - - HistorySetSync + min t extended From 9747899426e2ef94708b7e4580810e99706ddefd Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 6 May 2021 15:41:50 -0600 Subject: [PATCH 34/51] convert historySetSnapShot --- .../HistorySetSnapShot.py | 192 +++++++++--------- .../PostProcessorFunctions/HistorySetSync.py | 9 +- .../test_historySetSnapshot.xml | 19 +- .../test_historySetSnapshot_mixed.xml | 8 +- 4 files changed, 119 insertions(+), 109 deletions(-) diff --git a/framework/PostProcessorFunctions/HistorySetSnapShot.py b/framework/PostProcessorFunctions/HistorySetSnapShot.py index 9b226d4b85..89d9dae945 100644 --- a/framework/PostProcessorFunctions/HistorySetSnapShot.py +++ b/framework/PostProcessorFunctions/HistorySetSnapShot.py @@ -15,21 +15,17 @@ Created on October 28, 2015 """ - -from __future__ import division, print_function, unicode_literals, absolute_import - -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase, CheckInterfacePP - import os import numpy as np from scipy import interpolate import copy import importlib +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase import HistorySetSync as HSS from utils import InputData, InputTypes -class HistorySetSnapShot(PostProcessorInterfaceBase): +class HistorySetSnapShot(PostProcessorPluginBase): """ This Post-Processor performs the conversion from HistorySet to PointSet The conversion is made so that each history H is converted to a single point P. @@ -48,7 +44,6 @@ class cls. specifying input of cls. """ inputSpecification = super().getInputSpecification() - inputSpecification.setCheckClass(CheckInterfacePP("HistorySetSnapShot")) HSSSTypeType = InputTypes.makeEnumType("HSSSType", "HSSSTypeType", ['min','max','average','value','timeSlice','mixed']) inputSpecification.addSub(InputData.parameterInputFactory("type", contentType=HSSSTypeType)) inputSpecification.addSub(InputData.parameterInputFactory("numberOfSamples", contentType=InputTypes.IntegerType)) @@ -65,40 +60,60 @@ class cls. valueSub.addParam("pivotVar", InputTypes.StringType) valueSub.addParam("pivotVal", InputTypes.StringType) inputSpecification.addSub(valueSub) - #Should method be in super class? - inputSpecification.addSub(InputData.parameterInputFactory("method", contentType=InputTypes.StringType)) return inputSpecification - def initialize(self): + def __init__(self): """ - Method to initialize the Interfaced Post-processor - @ In, None, - @ Out, None, + Constructor + @ In, None + @ Out, None """ - - PostProcessorInterfaceBase.initialize(self) - self.inputFormat = 'HistorySet' - self.outputFormat = 'PointSet' - + super().__init__() + self.setInputDataType('dict') + self.keepInputMeta(True) + self.outputMultipleRealizations = True # True indicate multiple realizations are returned + self.validDataType = ['PointSet'] # The list of accepted types of DataObject self.type = None - self.pivotParameter = None + self.pivotParameter = None #pivotParameter identify the ID of the temporal variabl self.pivotVar = None self.pivotVal = None self.timeInstant = None - self.numberOfSamples = None - self.pivotParameter = None self.interpolation = None - self.classifiers = {} #for "mixed" mode + def initialize(self, runInfo, inputs, initDict=None): + """ + Method to initialize the DataClassifier post-processor. + @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) + @ In, inputs, list, list of inputs + @ In, initDict, dict, optional, dictionary with initialization options + @ Out, None + """ + super().initialize(runInfo, inputs, initDict) + if len(inputs)>1: + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one dataObject') + if inputs[0].type != 'HistorySet': + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type)) + #sync if needed + if self.type == 'timeSlice': + #for syncing, need numberOfSamples, extension + if self.numberOfSamples is None: + self.raiseIOError(IOError,'When using "timeSlice" a "numberOfSamples" must be specified for synchronizing!') + if self.extension is None: + self.raiseAnError(IOError,'When using "timeSlice" an "extension" method must be specified for synchronizing!') + #perform sync + PostProcessorInterfaces = importlib.import_module("PostProcessorInterfaces") + self.HSsyncPP = PostProcessorInterfaces.factory.returnInstance('HistorySetSync') + self.HSsyncPP.setParams(self.numberOfSamples,self.pivotParameter,self.extension,syncMethod='grid') + self.HSsyncPP.initialize(runInfo, inputs, initDict) + def _handleInput(self, paramInput): """ Function to handle the parameter input. @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - for child in paramInput.subparts: tag = child.getName() if tag =='type': @@ -140,7 +155,7 @@ def _handleInput(self, paramInput): self.classifiers[tag].append( (entry,depVar,float(depVal)) ) elif tag != 'method': self.raiseAnError(IOError,'Unrecognized node for HistorySetSnapShot in "mixed" mode:',tag) - elif tag !='method': + else: self.raiseAnError(IOError, 'HistorySetSnapShot Interfaced Post-Processor ' + str(self.name) + ' : XML node ' + str(child.tag) + ' is not recognized') needspivotParameter = ['average','timeSlice'] @@ -148,77 +163,74 @@ def _handleInput(self, paramInput): if self.pivotParameter is None: self.raiseAnError(IOError,'"pivotParameter" is required for',needspivotParameter,'but not provided!') - #sync if needed - if self.type == 'timeSlice': - #for syncing, need numberOfSamples, extension - if self.numberOfSamples is None: - self.raiseIOError(IOError,'When using "timeSlice" a "numberOfSamples" must be specified for synchronizing!') - if self.extension is None: - self.raiseAnError(IOError,'When using "timeSlice" an "extension" method must be specified for synchronizing!') - #perform sync - PostProcessorInterfaces = importlib.import_module("PostProcessorInterfaces") - self.HSsyncPP = PostProcessorInterfaces.factory.returnInstance('HistorySetSync') - self.HSsyncPP.initialize(self.numberOfSamples,self.pivotParameter,self.extension,syncMethod='grid') - - def run(self,inputDic, pivotVal=None): + def run(self,inputIn, pivotVal=None): """ Method to post-process the dataObjects - @ In, inputDic, list, list of dictionaries which contains the data inside the input DataObjects + @ In, inputIn, dict, dictionaries which contains the data inside the input DataObjects + inputIn = {'Data':listData, 'Files':listOfFiles}, + listData has the following format: (listOfInputVars, listOfOutVars, DataDict) with + DataDict is a dictionary that has the format + dataDict['dims'] = dict {varName:independentDimensions} + dataDict['metadata'] = dict {metaVarName:metaVarValue} + dataDict['type'] = str TypeOfDataObject + dataDict['inpVars'] = list of input variables + dataDict['outVars'] = list of output variables + dataDict['numberRealization'] = int SizeOfDataObject + dataDict['name'] = str DataObjectName + dataDict['metaKeys'] = list of meta variables + dataDict['data'] = dict {varName: varValue(1-D or 2-D numpy array)} @ In, pivotVal, float, value associated to the variable considered (default None) @ Out, outputPSDic, dict, output dictionary """ - if len(inputDic)>1: - self.raiseAnError(IOError, 'HistorySetSnapShot Interfaced Post-Processor ' + str(self.name) + ' accepts only one dataObject') - else: - inputDic = inputDic[0] - #for timeSlice we call historySetWindow - if self.type == 'timeSlice': - outputHSDic = self.HSsyncPP.run([inputDic]) - outDict = historySetWindow(outputHSDic,self.timeInstant,inputDic['inpVars'],inputDic['outVars'],inputDic['numberRealizations'],self.pivotParameter) - for key in inputDic['metaKeys']: - outDict['data'][key] = inputDic['data'][key] - return outDict + _, _, inputDic = inputIn['Data'][0] + #for timeSlice we call historySetWindow + if self.type == 'timeSlice': + outputHSDic = self.HSsyncPP.run(inputIn) + outDict = historySetWindow(outputHSDic,self.timeInstant,inputDic['inpVars'],inputDic['outVars'],inputDic['numberRealizations'],self.pivotParameter) + for key in inputDic['metaKeys']: + outDict['data'][key] = inputDic['data'][key] + return outDict - #for other non-mixed methods we call historySnapShot - elif self.type != 'mixed': - outputPSDic = historySnapShot(inputDic,self.pivotVar,self.type,self.pivotVal,self.pivotParameter) - return outputPSDic - # mixed is more complicated: we pull out values by method instead of a single slice type - # We use the same methods to get slices, then pick out only the requested variables - else: - #establish the output dict - outDict = {'data':{}} - #replicate input space - for var in inputDic['inpVars']: - outDict['data'][var] = inputDic['data'][var] - # replicate metadata - # add meta variables back - for key in inputDic['metaKeys']: - outDict['data'][key] = inputDic['data'][key] - outDict['dims'] = {key:[] for key in inputDic['dims'].keys()} - #loop over the methods requested to fill output space - for method,entries in self.classifiers.items(): - #min, max take no special effort - if method in ['min','max']: - for var in entries: - getDict = historySnapShot(inputDic,var,method) - outDict['data'][var] = getDict['data'][var] - #average requires the pivotParameter - elif method == 'average': - for var in entries: - getDict = historySnapShot(inputDic,var,method,tempID=self.pivotParameter,pivotVal=self.pivotParameter) - outDict['data'][var] = getDict['data'][var] - #timeSlice requires the time value - #functionality removed for now until we recall why it's desirable - #elif method == 'timeSlice': - # for var,time in entries: - # getDict = historySetWindow(inputDic,time,self.pivotParameter) - #value requires the dependent variable and value - elif method == 'value': - for var,depVar,depVal in entries: - getDict = historySnapShot(inputDic,depVar,method,pivotVal=depVal) - outDict['data'][var] = getDict['data'][var] - return outDict + #for other non-mixed methods we call historySnapShot + elif self.type != 'mixed': + outputPSDic = historySnapShot(inputDic,self.pivotVar,self.type,self.pivotVal,self.pivotParameter) + return outputPSDic + # mixed is more complicated: we pull out values by method instead of a single slice type + # We use the same methods to get slices, then pick out only the requested variables + else: + #establish the output dict + outDict = {'data':{}} + #replicate input space + for var in inputDic['inpVars']: + outDict['data'][var] = inputDic['data'][var] + # replicate metadata + # add meta variables back + for key in inputDic['metaKeys']: + outDict['data'][key] = inputDic['data'][key] + outDict['dims'] = {key:[] for key in inputDic['dims'].keys()} + #loop over the methods requested to fill output space + for method,entries in self.classifiers.items(): + #min, max take no special effort + if method in ['min','max']: + for var in entries: + getDict = historySnapShot(inputDic,var,method) + outDict['data'][var] = getDict['data'][var] + #average requires the pivotParameter + elif method == 'average': + for var in entries: + getDict = historySnapShot(inputDic,var,method,tempID=self.pivotParameter,pivotVal=self.pivotParameter) + outDict['data'][var] = getDict['data'][var] + #timeSlice requires the time value + #functionality removed for now until we recall why it's desirable + #elif method == 'timeSlice': + # for var,time in entries: + # getDict = historySetWindow(inputDic,time,self.pivotParameter) + #value requires the dependent variable and value + elif method == 'value': + for var,depVar,depVal in entries: + getDict = historySnapShot(inputDic,depVar,method,pivotVal=depVal) + outDict['data'][var] = getDict['data'][var] + return outDict def historySnapShot(inputDic, pivotVar, snapShotType, pivotVal=None, tempID = None): """ @@ -287,8 +299,6 @@ def historySetWindow(inputDic,timeStepID,inpVars,outVars,N,pivotParameter): """ outputDic={'data':{}} outputDic['dims'] = {key:[] for key in inputDic['dims'].keys()} - #outputDic['dims'][pivotParameter]=[] - for var in inpVars: outputDic['data'][var] = inputDic['data'][var] diff --git a/framework/PostProcessorFunctions/HistorySetSync.py b/framework/PostProcessorFunctions/HistorySetSync.py index c4c50c8d00..485cbc875b 100644 --- a/framework/PostProcessorFunctions/HistorySetSync.py +++ b/framework/PostProcessorFunctions/HistorySetSync.py @@ -79,13 +79,20 @@ def initialize(self, runInfo, inputs, initDict=None): if inputs[0].type != 'HistorySet': self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type)) + def setParams(self, numberOfSamples, pivotParameter, extension, syncMethod): + """ + """ + self.numberOfSamples = numberOfSamples + self.pivotParameter = pivotParameter + self.extension = extension + self.syncMethod = syncMethod + def _handleInput(self, paramInput): """ Function to handle the parameter input. @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - for child in paramInput.subparts: if child.getName() == 'numberOfSamples': self.numberOfSamples = child.value diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSnapshot.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSnapshot.xml index 713e16605d..e67e734b50 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSnapshot.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSnapshot.xml @@ -6,10 +6,10 @@ 2016-02-09 InterfacedPostProcessor - Tests of the HistorySetSnapShot interfaced post-processor + Tests of the HistorySetSnapShot interfaced post-processor - + HistorySetSnapShot FirstMRun,PP,PP1,plot1,PP2,plot2,PP3,plot3,PP4,plot4 @@ -84,32 +84,27 @@ sigma,rho,beta,x,y,z,time,x0,y0,z0 - - HistorySetSnapShot + timeSlice 10 zeroed time 0 - - HistorySetSnapShot + min x - - HistorySetSnapShot + max x - - HistorySetSnapShot + average x time - - HistorySetSnapShot + value x 8.3 diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSnapshot_mixed.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSnapshot_mixed.xml index 3a9ae5cda4..ed0bd10eb3 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSnapshot_mixed.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_historySetSnapshot_mixed.xml @@ -9,7 +9,7 @@ Tests of HistorySetSnapShot and its mixed type of snapshot techniques - + HistorySetSnapShotMixed FirstMRun,snap,snap2 @@ -42,14 +42,12 @@ sigma,rho,beta,x,y,z,time,x0,y0,z0 - - HistorySetSnapShot + mixed x y - - HistorySetSnapShot + mixed x y From 95024cbf4a7bf5dc556278bf7dd6787404238da3 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 6 May 2021 16:25:35 -0600 Subject: [PATCH 35/51] update --- framework/PostProcessorFunctions/HistorySetSync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/PostProcessorFunctions/HistorySetSync.py b/framework/PostProcessorFunctions/HistorySetSync.py index 485cbc875b..81e2b673e0 100644 --- a/framework/PostProcessorFunctions/HistorySetSync.py +++ b/framework/PostProcessorFunctions/HistorySetSync.py @@ -115,7 +115,7 @@ def _handleInput(self, paramInput): def run(self,inputIn): """ Method to post-process the dataObjects - @ In, inputDic, dict, dictionaries which contains the data inside the input DataObjects + @ In, inputIn, dict, dictionaries which contains the data inside the input DataObjects inputIn = {'Data':listData, 'Files':listOfFiles}, listData has the following format: (listOfInputVars, listOfOutVars, DataDict) with DataDict is a dictionary that has the format From 69156860705d2749cc3170bd45f9bf161608fbfc Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 6 May 2021 16:48:20 -0600 Subject: [PATCH 36/51] convert TypicalHistoryFromHS --- .../TypicalHistoryFromHistorySet.py | 68 ++++++++++--------- .../test_typicalHistoryFromHS.xml | 3 +- 2 files changed, 38 insertions(+), 33 deletions(-) diff --git a/framework/PostProcessorFunctions/TypicalHistoryFromHistorySet.py b/framework/PostProcessorFunctions/TypicalHistoryFromHistorySet.py index bf99406c2f..f7e81f2978 100644 --- a/framework/PostProcessorFunctions/TypicalHistoryFromHistorySet.py +++ b/framework/PostProcessorFunctions/TypicalHistoryFromHistorySet.py @@ -15,16 +15,15 @@ Created on Feb 17, 2016 ''' -from __future__ import division, print_function, unicode_literals, absolute_import - -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase, CheckInterfacePP import numpy as np import copy from collections import defaultdict from functools import partial + +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import mathUtils, utils, InputData, InputTypes -class TypicalHistoryFromHistorySet(PostProcessorInterfaceBase): +class TypicalHistoryFromHistorySet(PostProcessorPluginBase): """ This class forms a typical history from a history set The methodology can be found at: @@ -41,27 +40,38 @@ class cls. specifying input of cls. """ inputSpecification = super().getInputSpecification() - inputSpecification.setCheckClass(CheckInterfacePP("TypicalHistoryFromHistorySet")) inputSpecification.addSub(InputData.parameterInputFactory("subseqLen", contentType=InputTypes.IntegerListType)) inputSpecification.addSub(InputData.parameterInputFactory("pivotParameter", contentType=InputTypes.StringType)) inputSpecification.addSub(InputData.parameterInputFactory("outputLen", contentType=InputTypes.FloatType)) - #Should method be in super class? - inputSpecification.addSub(InputData.parameterInputFactory("method", contentType=InputTypes.StringType)) return inputSpecification - def initialize(self): + def __init__(self): + """ + Constructor + @ In, None + @ Out, None + """ + super().__init__() + self.setInputDataType('dict') + self.keepInputMeta(True) + self.outputMultipleRealizations = True # True indicate multiple realizations are returned + self.validDataType = ['HistorySet'] # The list of accepted types of DataObject + self.pivotParameter = 'time' #FIXME this assumes the ARMA model! Dangerous assumption. + self.outputLen = None + + def initialize(self, runInfo, inputs, initDict=None): """ - Method to initialize the Interfaced Post-processor - @ In, None, - @ Out, None, + Method to initialize the DataClassifier post-processor. + @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) + @ In, inputs, list, list of inputs + @ In, initDict, dict, optional, dictionary with initialization options + @ Out, None """ - PostProcessorInterfaceBase.initialize(self) - self.inputFormat = 'HistorySet' - self.outputFormat = 'HistorySet' - #if not hasattr(self, 'pivotParameter'): - # self.pivotParameter = 'Time' #FIXME this assumes the ARMA model! Dangerous assumption. - if not hasattr(self, 'outputLen'): - self.outputLen = None + super().initialize(runInfo, inputs, initDict) + if len(inputs)>1: + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one dataObject') + if inputs[0].type != 'HistorySet': + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type)) def _handleInput(self, paramInput): """ @@ -69,7 +79,6 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - self.name = paramInput.parameterValues['name'] for child in paramInput.subparts: if child.getName() == 'subseqLen': @@ -78,12 +87,10 @@ def _handleInput(self, paramInput): self.pivotParameter = child.value elif child.getName() == 'outputLen': self.outputLen = child.value - # checks if not hasattr(self, 'pivotParameter'): self.raiseAnError(IOError,'"pivotParameter" was not specified for "{}" PostProcessor!'.format(self.name)) - def retrieveHistory(self,dictIn,N): """ Function that returns a dictionary containing the data of history N @@ -96,18 +103,17 @@ def retrieveHistory(self,dictIn,N): outputDict[var]=dictIn[var][N] return outputDict - def run(self,inputDic): + def run(self,inputIn): """ - @ In, inputDic, list, list of dictionaries which contains the data inside the input DataObjects + @ In, inputIn, dict, dictionaries which contains the data inside the input DataObjects @ Out, outputDic, dict, dictionary which contains the data to be collected by output DataObject """ - if len(inputDic)>1: - self.raiseAnError(IOError, self.__class__.__name__ + ' Interfaced Post-Processor ' + str(self.name) + ' accepts only one dataObject') + inpVars, outVars, inputDic = inputIn['Data'][0] + numSamples = inputDic['numberRealizations'] + inputDict = inputDic['data'] - #get actual data - inputDict = inputDic[0]['data'] #identify features - self.features = inputDic[0]['outVars'] + self.features = outVars #don't keep the pivot parameter in the feature space if self.pivotParameter in self.features: self.features.remove(self.pivotParameter) @@ -119,7 +125,7 @@ def run(self,inputDic): ## Check if data is synchronized referenceHistory = 0 referenceTimeAxis = inputDict[self.pivotParameter][referenceHistory] - for hist in range(inputDic[0]['numberRealizations']): + for hist in range(numSamples): if str(inputDict[self.pivotParameter][hist]) != str(referenceTimeAxis): errorMessage = '{} Interfaced Post-Processor "{}": one or more histories in the historySet have different time scales (e.g., reference points: {} and {})'.format(self.__class__.__name__, self.name,referenceHistory, hist) self.raiseAnError(IOError, errorMessage) @@ -128,7 +134,7 @@ def run(self,inputDic): #data dictionaries have form {historyNumber:{VarName:[data], VarName:[data]}} reshapedData = {} newHistoryCounter = 0 #new history tracking labels - for historyNumber in range(inputDic[0]['numberRealizations']): + for historyNumber in range(numSamples): #array of the pivot values provided in the history pivotValues = np.asarray(inputDict[self.pivotParameter][historyNumber]) #if the desired output pivot value length is (equal to or) longer than the provided history ... @@ -269,7 +275,7 @@ def run(self,inputDic): outputDict['data'][var] = np.zeros(1, dtype=object) outputDict['data'][var][0] = typicalData[var] # preserve input data - for var in inputDic[0]['inpVars']: + for var in inputDic['inpVars']: outputDict['data'][var] = np.zeros(1, dtype=object) outputDict['data'][var][0] = inputDict[var][0] outputDict['dims']={} diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_typicalHistoryFromHS.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_typicalHistoryFromHS.xml index fae6c55579..6ff1bf124c 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_typicalHistoryFromHS.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_typicalHistoryFromHS.xml @@ -25,8 +25,7 @@ - - TypicalHistoryFromHistorySet + 3600 21600 Time From 4b09ac225e528bfb21e843e8c741b96f40b7f1cd Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 6 May 2021 19:19:51 -0600 Subject: [PATCH 37/51] convert the rest of interface pp --- .../dataObjectLabelFilter.py | 113 ++++++++---------- .../testInterfacedPP.py | 67 ++++++----- .../testInterfacedPP_PointSet.py | 65 +++++----- .../test_interfacedPP.xml | 5 +- .../test_interfacedPP_parallel.xml | 5 +- .../test_interfacedPP_pointset.xml | 5 +- .../test_metadata_usage_in_interfacePP.xml | 3 +- 7 files changed, 133 insertions(+), 130 deletions(-) diff --git a/framework/PostProcessorFunctions/dataObjectLabelFilter.py b/framework/PostProcessorFunctions/dataObjectLabelFilter.py index 922915d10c..e8da63d4eb 100644 --- a/framework/PostProcessorFunctions/dataObjectLabelFilter.py +++ b/framework/PostProcessorFunctions/dataObjectLabelFilter.py @@ -15,17 +15,15 @@ Created on October 28, 2015 """ - -from __future__ import division, print_function, unicode_literals, absolute_import -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase, CheckInterfacePP import os import numpy as np from scipy import interpolate import copy +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes -class dataObjectLabelFilter(PostProcessorInterfaceBase): +class dataObjectLabelFilter(PostProcessorPluginBase): """ This Post-Processor filters out the points or histories accordingly to a chosen clustering label """ @@ -39,27 +37,21 @@ class cls. specifying input of cls. """ inputSpecification = super().getInputSpecification() - inputSpecification.setCheckClass(CheckInterfacePP("dataObjectLabelFilter")) - DOLFDataTypeType = InputTypes.makeEnumType("DOLFDataType", "DOLFDataTypeType", ['HistorySet','PointSet']) - inputSpecification.addSubSimple("dataType", DOLFDataTypeType) inputSpecification.addSubSimple("label", InputTypes.StringType) inputSpecification.addSubSimple("clusterIDs", InputTypes.IntegerListType) - #Should method be in super class? - inputSpecification.addSubSimple("method", contentType=InputTypes.StringType) return inputSpecification - def initialize(self): + def __init__(self): """ - Method to initialize the Interfaced Post-processor - @ In, None, - @ Out, None, - + Constructor + @ In, None + @ Out, None """ - - PostProcessorInterfaceBase.initialize(self) - self.inputFormat = None - self.outputFormat = None - + super().__init__() + self.setInputDataType('dict') + self.keepInputMeta(True) + self.outputMultipleRealizations = True # True indicate multiple realizations are returned + self.validDataType = ['HistorySet','PointSet'] # The list of accepted types of DataObject self.label = None self.clusterIDs = [] @@ -69,58 +61,59 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - for child in paramInput.subparts: - if child.getName() == 'dataType': - dataType = child.value - if dataType in set(['HistorySet','PointSet']): - self.inputFormat = dataType - self.outputFormat = dataType - else: - self.raiseAnError(IOError, 'dataObjectLabelFilter Interfaced Post-Processor ' + str(self.name) + ' : dataType ' + str(dataType) + ' is not recognized (available are HistorySet, PointSet)') - elif child.getName() == 'label': + if child.getName() == 'label': self.label = child.value elif child.getName() == 'clusterIDs': for clusterID in child.value: self.clusterIDs.append(clusterID) - elif child.getName() !='method': - self.raiseAnError(IOError, 'dataObjectLabelFilter Interfaced Post-Processor ' + str(self.name) + ' : XML node ' + str(child) + ' is not recognized') + else: + self.raiseAnError(IOError, 'Post-Processor ' + str(self.name) + ' : XML node ' + str(child) + ' is not recognized') - def run(self,inputDic): + def run(self,inputIn): """ Method to post-process the dataObjects - @ In, inputDic, list, list of dictionaries which contains the data inside the input DataObjects + @ In, inputIn, dict, dictionaries which contains the data inside the input DataObjects + inputIn = {'Data':listData, 'Files':listOfFiles}, + listData has the following format: (listOfInputVars, listOfOutVars, DataDict) with + DataDict is a dictionary that has the format + dataDict['dims'] = dict {varName:independentDimensions} + dataDict['metadata'] = dict {metaVarName:metaVarValue} + dataDict['type'] = str TypeOfDataObject + dataDict['inpVars'] = list of input variables + dataDict['outVars'] = list of output variables + dataDict['numberRealization'] = int SizeOfDataObject + dataDict['name'] = str DataObjectName + dataDict['metaKeys'] = list of meta variables + dataDict['data'] = dict {varName: varValue(1-D or 2-D numpy array)} @ Out, outputDic, dictionary, output dictionary to be provided to the base class """ - if len(inputDic)>1: - self.raiseAnError(IOError, 'HistorySetSync Interfaced Post-Processor ' + str(self.name) + ' accepts only one dataObject') + _, _, inputDict = inputIn['Data'][0] + outputDict = {} + outputDict['data'] ={} + outputDict['dims'] = {} + outputDict['metadata'] = copy.deepcopy(inputDict['metadata']) if 'metadata' in inputDict.keys() else {} + labelType = type(inputDict['data'][self.label][0]) + if labelType != np.ndarray: + indexes = np.where(np.in1d(inputDict['data'][self.label],self.clusterIDs))[0] + for key in inputDict['data'].keys(): + outputDict['data'][key] = inputDict['data'][key][indexes] + outputDict['dims'][key] = [] else: - inputDict = inputDic[0] - outputDict = {} - outputDict['data'] ={} - outputDict['dims'] = {} - outputDict['metadata'] = copy.deepcopy(inputDict['metadata']) if 'metadata' in inputDict.keys() else {} - labelType = type(inputDict['data'][self.label][0]) - if labelType != np.ndarray: - indexes = np.where(np.in1d(inputDict['data'][self.label],self.clusterIDs))[0] - for key in inputDict['data'].keys(): - outputDict['data'][key] = inputDict['data'][key][indexes] + for key in inputDict['data'].keys(): + if type(inputDict['data'][key][0]) == np.ndarray: + temp = [] + for cnt in range(len(inputDict['data'][self.label])): + indexes = np.where(np.in1d(inputDict['data'][self.label][cnt],self.clusterIDs))[0] + if len(indexes) > 0: + temp.append(copy.deepcopy(inputDict['data'][key][cnt][indexes])) + outputDict['data'][key] = np.asanyarray(temp) outputDict['dims'][key] = [] - else: - for key in inputDict['data'].keys(): - if type(inputDict['data'][key][0]) == np.ndarray: - temp = [] - for cnt in range(len(inputDict['data'][self.label])): - indexes = np.where(np.in1d(inputDict['data'][self.label][cnt],self.clusterIDs))[0] - if len(indexes) > 0: - temp.append(copy.deepcopy(inputDict['data'][key][cnt][indexes])) - outputDict['data'][key] = np.asanyarray(temp) + else: + outputDict['data'][key] = np.empty(0) + for cnt in range(len(inputDict['data'][self.label])): + indexes = np.where(np.in1d(inputDict['data'][self.label][cnt],self.clusterIDs))[0] + if len(indexes) > 0: + outputDict['data'][key] = np.append(outputDict['data'][key], copy.deepcopy(inputDict['data'][key][cnt])) outputDict['dims'][key] = [] - else: - outputDict['data'][key] = np.empty(0) - for cnt in range(len(inputDict['data'][self.label])): - indexes = np.where(np.in1d(inputDict['data'][self.label][cnt],self.clusterIDs))[0] - if len(indexes) > 0: - outputDict['data'][key] = np.append(outputDict['data'][key], copy.deepcopy(inputDict['data'][key][cnt])) - outputDict['dims'][key] = [] return outputDict diff --git a/framework/PostProcessorFunctions/testInterfacedPP.py b/framework/PostProcessorFunctions/testInterfacedPP.py index d5ef5e7b3d..cc5f4d02eb 100644 --- a/framework/PostProcessorFunctions/testInterfacedPP.py +++ b/framework/PostProcessorFunctions/testInterfacedPP.py @@ -15,16 +15,13 @@ Created on December 1, 2015 ''' -from __future__ import division, print_function, unicode_literals, absolute_import - import copy -import itertools import numpy as np -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase, CheckInterfacePP +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes -class testInterfacedPP(PostProcessorInterfaceBase): +class testInterfacedPP(PostProcessorPluginBase): """ This class represents the most basic interfaced post-processor This class inherits form the base class PostProcessorInterfaceBase and it contains the three methods that need to be implemented: - initialize @@ -40,40 +37,51 @@ class cls. specifying input of cls. """ inputSpecification = super().getInputSpecification() - inputSpecification.setCheckClass(CheckInterfacePP("testInterfacedPP")) inputSpecification.addSubSimple("xmlNodeExample", InputTypes.StringType) - inputSpecification.addSubSimple("method", InputTypes.StringType) return inputSpecification - def initialize(self): + def __init__(self): """ - Method to initialize the Interfaced Post-processor - @ In, None, - @ Out, None, + Constructor + @ In, None + @ Out, None """ - PostProcessorInterfaceBase.initialize(self) - self.inputFormat = 'HistorySet' - self.outputFormat = 'HistorySet' + super().__init__() + self.setInputDataType('dict') + self.keepInputMeta(True) + self.outputMultipleRealizations = True # True indicate multiple realizations are returned + self.validDataType = ['HistorySet'] # The list of accepted types of DataObject - def run(self,inputDic): + def initialize(self, runInfo, inputs, initDict=None): + """ + Method to initialize the DataClassifier post-processor. + @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) + @ In, inputs, list, list of inputs + @ In, initDict, dict, optional, dictionary with initialization options + @ Out, None + """ + super().initialize(runInfo, inputs, initDict) + if len(inputs)>1: + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one dataObject') + if inputs[0].type != 'HistorySet': + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type)) + + def run(self,inputIn): """ This method is transparent: it passes the inputDic directly as output - @ In, inputDic, dict, dictionary which contains the data inside the input DataObject - @ Out, inputDic, dict, same inputDic dictionary + @ In, inputIn, dict, dictionary which contains the data inside the input DataObject + @ Out, outputDict, dict, the output dictionary, passing through HistorySet info """ - if len(inputDic)>1: - self.raiseAnError(IOError, 'testInterfacedPP_PointSet Interfaced Post-Processor ' + str(self.name) + ' accepts only one dataObject') - else: - inputDict = inputDic[0] - outputDict = {'data':{}} - outputDict['dims'] = copy.deepcopy(inputDict['dims']) - for key in inputDict['data'].keys(): - outputDict['data'][key] = copy.deepcopy(inputDict['data'][key]) + _, _, inputDict = inputIn['Data'][0] + outputDict = {'data':{}} + outputDict['dims'] = copy.deepcopy(inputDict['dims']) + for key in inputDict['data'].keys(): + outputDict['data'][key] = copy.deepcopy(inputDict['data'][key]) - # add meta variables back - for key in inputDict['metaKeys']: - outputDict['data'][key] = inputDict['data'][key] - return outputDict + # add meta variables back + for key in inputDict['metaKeys']: + outputDict['data'][key] = inputDict['data'][key] + return outputDict def _handleInput(self, paramInput): """ @@ -81,7 +89,6 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - for child in paramInput.subparts: if child.getName() == 'xmlNodeExample': self.xmlNodeExample = child.value diff --git a/framework/PostProcessorFunctions/testInterfacedPP_PointSet.py b/framework/PostProcessorFunctions/testInterfacedPP_PointSet.py index fd2a8026fa..918bace454 100644 --- a/framework/PostProcessorFunctions/testInterfacedPP_PointSet.py +++ b/framework/PostProcessorFunctions/testInterfacedPP_PointSet.py @@ -15,13 +15,11 @@ Created on December 1, 2015 ''' -from __future__ import division, print_function, unicode_literals, absolute_import - import copy -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase, CheckInterfacePP +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes -class testInterfacedPP_PointSet(PostProcessorInterfaceBase): +class testInterfacedPP_PointSet(PostProcessorPluginBase): """ This class represents the most basic interfaced post-processor This class inherits form the base class PostProcessorInterfaceBase and it contains the three methods that need to be implemented: - initialize @@ -37,41 +35,51 @@ class cls. specifying input of cls. """ inputSpecification = super().getInputSpecification() - inputSpecification.setCheckClass(CheckInterfacePP("testInterfacedPP_PointSet")) inputSpecification.addSubSimple("xmlNodeExample", InputTypes.StringType) - inputSpecification.addSubSimple("method", InputTypes.StringType) return inputSpecification - def initialize(self): + def __init__(self): + """ + Constructor + @ In, None + @ Out, None """ - Method to initialize the Interfaced Post-processor - @ In, None, - @ Out, None, + super().__init__() + self.setInputDataType('dict') + self.keepInputMeta(True) + self.outputMultipleRealizations = True # True indicate multiple realizations are returned + self.validDataType = ['PointSet'] # The list of accepted types of DataObject + def initialize(self, runInfo, inputs, initDict=None): """ - PostProcessorInterfaceBase.initialize(self) - self.inputFormat = 'PointSet' - self.outputFormat = 'PointSet' + Method to initialize the DataClassifier post-processor. + @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) + @ In, inputs, list, list of inputs + @ In, initDict, dict, optional, dictionary with initialization options + @ Out, None + """ + super().initialize(runInfo, inputs, initDict) + if len(inputs)>1: + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one dataObject') + if inputs[0].type != 'PointSet': + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only PointSet dataObject, but got "{}"'.format(inputs[0].type)) - def run(self,inputDic): + def run(self,inputIn): """ This method is transparent: it passes the inputDic directly as output - @ In, inputDic, list, list of dictionaries which contains the data inside the input DataObjects - @ Out, inputDic, dict, same inputDic dictionary + @ In, inputIn, dict, dictionaries which contains the data inside the input DataObjects + @ Out, outputDict, dict, the output dictionary, passing through PointSet info """ - if len(inputDic)>1: - self.raiseAnError(IOError, 'testInterfacedPP_PointSet Interfaced Post-Processor ' + str(self.name) + ' accepts only one dataObject') - else: - inputDict = inputDic[0] - outputDict = {'data':{}} - outputDict['dims'] = copy.deepcopy(inputDict['dims']) - for key in inputDict['data'].keys(): - outputDict['data'][key] = copy.deepcopy(inputDict['data'][key]) - # add meta variables back - for key in inputDict['metaKeys']: - outputDict['data'][key] = inputDict['data'][key] - return outputDict + _, _, inputDict = inputIn['Data'][0] + outputDict = {'data':{}} + outputDict['dims'] = copy.deepcopy(inputDict['dims']) + for key in inputDict['data'].keys(): + outputDict['data'][key] = copy.deepcopy(inputDict['data'][key]) + # add meta variables back + for key in inputDict['metaKeys']: + outputDict['data'][key] = inputDict['data'][key] + return outputDict def _handleInput(self, paramInput): """ @@ -79,7 +87,6 @@ def _handleInput(self, paramInput): @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ - for child in paramInput.subparts: if child.getName() == 'xmlNodeExample': self.xmlNodeExample = child.value diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP.xml index 413923e043..90f66c8ce7 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP.xml @@ -9,7 +9,7 @@ Test of the interfaced post-processor interface - + interfacedPostProcessor FirstMRun,PP @@ -41,8 +41,7 @@ sigma,rho,beta,x,y,z,time,x0,y0,z0 - - testInterfacedPP + 9 diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP_parallel.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP_parallel.xml index 946ef13bb7..692ed90af3 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP_parallel.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP_parallel.xml @@ -9,7 +9,7 @@ Test of the interfaced post-processor interface performed in parallel - + interfacedPostProcessorParallel FirstMRun,PP @@ -42,8 +42,7 @@ sigma,rho,beta,x,y,z,time,x0,y0,z0 - - testInterfacedPP + 9 diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP_pointset.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP_pointset.xml index 51101e2f51..865201eca6 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP_pointset.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_interfacedPP_pointset.xml @@ -9,7 +9,7 @@ Test of the interfaced post-processor interface to deal with pointSet - + interfacedPostProcessor_PointSet FirstMRun,PP @@ -40,8 +40,7 @@ sigma,rho,beta,x,y,z,time,x0,y0,z0 - - testInterfacedPP_PointSet + 9 diff --git a/tests/framework/PostProcessors/InterfacedPostProcessor/test_metadata_usage_in_interfacePP.xml b/tests/framework/PostProcessors/InterfacedPostProcessor/test_metadata_usage_in_interfacePP.xml index 77e353492a..49b5b9bf5b 100644 --- a/tests/framework/PostProcessors/InterfacedPostProcessor/test_metadata_usage_in_interfacePP.xml +++ b/tests/framework/PostProcessors/InterfacedPostProcessor/test_metadata_usage_in_interfacePP.xml @@ -32,8 +32,7 @@ - - HistorySetSync + all time extended From 1893901fd776f16b39bb9889ecb93745b9d3e88a Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 6 May 2021 19:45:13 -0600 Subject: [PATCH 38/51] remove interface pp related classes --- framework/Models/PostProcessors/Factory.py | 11 +- framework/Models/PostProcessors/HS2PS.py | 1 + .../Models/PostProcessors/ImportanceRank.py | 1 - .../PostProcessors/InterfacedPostProcessor.py | 183 -------------- .../PostProcessorFunctions/HStoPSOperator.py | 0 .../HistorySetSampling.py | 0 .../HistorySetSnapShot.py | 7 +- .../PostProcessorFunctions/HistorySetSync.py | 1 - .../TypicalHistoryFromHistorySet.py | 0 .../dataObjectLabelFilter.py | 0 .../testInterfacedPP.py | 0 .../testInterfacedPP_PointSet.py | 0 framework/PostProcessorInterfaceBaseClass.py | 230 ------------------ framework/PostProcessorInterfaces.py | 53 ---- 14 files changed, 15 insertions(+), 472 deletions(-) delete mode 100644 framework/Models/PostProcessors/InterfacedPostProcessor.py rename framework/{ => Models/PostProcessors}/PostProcessorFunctions/HStoPSOperator.py (100%) rename framework/{ => Models/PostProcessors}/PostProcessorFunctions/HistorySetSampling.py (100%) rename framework/{ => Models/PostProcessors}/PostProcessorFunctions/HistorySetSnapShot.py (98%) rename framework/{ => Models/PostProcessors}/PostProcessorFunctions/HistorySetSync.py (99%) rename framework/{ => Models/PostProcessors}/PostProcessorFunctions/TypicalHistoryFromHistorySet.py (100%) rename framework/{ => Models/PostProcessors}/PostProcessorFunctions/dataObjectLabelFilter.py (100%) rename framework/{ => Models/PostProcessors}/PostProcessorFunctions/testInterfacedPP.py (100%) rename framework/{ => Models/PostProcessors}/PostProcessorFunctions/testInterfacedPP_PointSet.py (100%) delete mode 100644 framework/PostProcessorInterfaceBaseClass.py delete mode 100644 framework/PostProcessorInterfaces.py diff --git a/framework/Models/PostProcessors/Factory.py b/framework/Models/PostProcessors/Factory.py index 35cac83e8b..ed5b79ac6b 100644 --- a/framework/Models/PostProcessors/Factory.py +++ b/framework/Models/PostProcessors/Factory.py @@ -31,7 +31,6 @@ from .LimitSurfaceIntegral import LimitSurfaceIntegral from .FastFourierTransform import FastFourierTransform from .ExternalPostProcessor import ExternalPostProcessor -from .InterfacedPostProcessor import InterfacedPostProcessor from .TopologicalDecomposition import TopologicalDecomposition from .DataClassifier import DataClassifier from .ComparisonStatisticsModule import ComparisonStatistics @@ -43,6 +42,16 @@ from .Validation import Validation from .HistorySetDelay import HistorySetDelay from .HS2PS import HS2PS + +### PostProcessorFunctions (orig: InterfacedPostProcessor) +from .PostProcessorFunctions.HStoPSOperator import HStoPSOperator +from .PostProcessorFunctions.HistorySetSampling import HistorySetSampling +from .PostProcessorFunctions.HistorySetSnapShot import HistorySetSnapShot +from .PostProcessorFunctions.HistorySetSync import HistorySetSync +from .PostProcessorFunctions.TypicalHistoryFromHistorySet import TypicalHistoryFromHistorySet +from .PostProcessorFunctions.dataObjectLabelFilter import dataObjectLabelFilter +from .PostProcessorFunctions.testInterfacedPP import testInterfacedPP +from .PostProcessorFunctions.testInterfacedPP_PointSet import testInterfacedPP_PointSet ## These utilize the optional prequisite library PySide, so don't error if they ## do not import appropriately. try: diff --git a/framework/Models/PostProcessors/HS2PS.py b/framework/Models/PostProcessors/HS2PS.py index 1d6f8a9618..3e85598a5b 100644 --- a/framework/Models/PostProcessors/HS2PS.py +++ b/framework/Models/PostProcessors/HS2PS.py @@ -62,6 +62,7 @@ def __init__(self): self.outputMultipleRealizations = True self.pivotParameter = None self.features = 'all' + self.transformationSettings = {} self.setInputDataType('xrDataset') self.keepInputMeta(True) diff --git a/framework/Models/PostProcessors/ImportanceRank.py b/framework/Models/PostProcessors/ImportanceRank.py index c83c6fc3ad..a8fdbe3e63 100644 --- a/framework/Models/PostProcessors/ImportanceRank.py +++ b/framework/Models/PostProcessors/ImportanceRank.py @@ -27,7 +27,6 @@ from .PostProcessorInterface import PostProcessorInterface from utils import InputData, InputTypes import Files -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase #Internal Modules End-------------------------------------------------------------------------------- class ImportanceRank(PostProcessorInterface): diff --git a/framework/Models/PostProcessors/InterfacedPostProcessor.py b/framework/Models/PostProcessors/InterfacedPostProcessor.py deleted file mode 100644 index b0ebc7e766..0000000000 --- a/framework/Models/PostProcessors/InterfacedPostProcessor.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on July 10, 2013 - -@author: alfoa -""" -import importlib - -from .PostProcessorInterface import PostProcessorInterface -from utils import InputData -from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase - -class InterfacedPostProcessor(PostProcessorInterface): - """ - This class allows to interface a general-purpose post-processor created ad-hoc by the user. - While the ExternalPostProcessor is designed for analysis-dependent cases, the InterfacedPostProcessor is designed more generic cases - The InterfacedPostProcessor parses (see PostProcessorInterfaces.py) and uses only the functions contained in the raven/framework/PostProcessorFunctions folder - The base class for the InterfacedPostProcessor that the user has to inherit to develop its own InterfacedPostProcessor is specified - in PostProcessorInterfaceBase.py - """ - - PostProcessorInterfaces = importlib.import_module("PostProcessorInterfaces") - - @classmethod - def getInputSpecification(cls): - """ - Method to get a reference to a class that specifies the input data for - class cls. - @ In, cls, the class for which we are retrieving the specification - @ Out, inputSpecification, InputData.ParameterInput, class to use for - specifying input of cls. - """ - ## This will replace the lines above - inputSpecification = super().getInputSpecification() - return inputSpecification - - def __init__(self): - """ - Constructor - @ In, None - @ Out, None - """ - super().__init__() - self.methodToRun = None - ## Currently, we have used both DataObject.addRealization and DataObject.load to - ## collect the PostProcessor returned outputs. DataObject.addRealization is used to - ## collect single realization, while DataObject.load is used to collect multiple realizations - ## However, the DataObject.load can not be directly used to collect single realization - self.outputMultipleRealizations = True - - def initialize(self, runInfo, inputs, initDict): - """ - Method to initialize the Interfaced processor - @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) - @ In, inputs, list, list of inputs - @ In, initDict, dict, dictionary with initialization options - @ Out, None - """ - super().initialize(runInfo, inputs, initDict) - - inputObj = inputs[-1] if type(inputs) == list else inputs - metaKeys = inputObj.getVars('meta') - self.addMetaKeys(metaKeys) - - def _localReadMoreXML(self, xmlNode): - """ - Function to read the portion of the xml input that belongs to this specialized class - and initialize some stuff based on the inputs got - @ In, xmlNode, xml.etree.Element, Xml element node - @ Out, None - """ - - # paramInput = InterfacedPostProcessor.getInputSpecification()() - # paramInput.parseNode(xmlNode) - - interfaceClasses = [c.getInputSpecification() for c in InterfacedPostProcessor.PostProcessorInterfaces.interfaceClasses()] - paramInput = InputData.parseFromList(xmlNode, interfaceClasses) - - self.methodToRun = paramInput.getName() - self.postProcessor = InterfacedPostProcessor.PostProcessorInterfaces.factory.returnInstance(self.methodToRun) - if not isinstance(self.postProcessor,PostProcessorInterfaceBase): - self.raiseAnError(IOError, 'InterfacedPostProcessor Post-Processor '+ self.name + - ' : not correctly coded; it must inherit the PostProcessorInterfaceBase class') - - self.postProcessor.initialize() - self.postProcessor._handleInput(paramInput) - if not set(self.returnFormat("input").split("|")) <= set(['HistorySet','PointSet']): - self.raiseAnError(IOError,'InterfacedPostProcessor Post-Processor '+ self.name + - ' : self.inputFormat not correctly initialized') - if not set(self.returnFormat("output").split("|")) <= set(['HistorySet','PointSet']): - self.raiseAnError(IOError,'InterfacedPostProcessor Post-Processor '+ self.name + - ' : self.outputFormat not correctly initialized') - - def run(self, inputIn): - """ - This method executes the interfaced post-processor action. - @ In, inputIn, dict, dictionary of data to process - @ Out, outputDic, dict, dict containing the post-processed results - """ - #FIXME THIS IS NOT CORRECT!!!! - try: - inputTypes = set([inp.type for inp in inputIn]) - check=True - except AttributeError: - check=False - if check: - for inp in inputIn: - if not inputTypes <= set(self.returnFormat("input").split("|")): - self.raiseAnError(IOError,'InterfacedPostProcessor Post-Processor named "'+ self.name + - '" : The input object "'+ inp.name +'" provided is of the wrong type. Got "'+ - inp.type + '" but expected "'+self.returnFormat("input") + '"!') - - inputDic= self.inputToInternal(inputIn) - self.raiseADebug('InterfacedPostProcessor Post-Processor '+ self.name +' : start to run') - outputDic = self.postProcessor.run(inputDic) - return outputDic - - def _inverse(self, inputIn): - outputDic = self.postProcessor._inverse(inputIn) - return outputDic - - def inputToInternal(self,inputs): - """ - Function to convert the received input into a format this object can - understand - @ In, input, list, list of dataObjects handed to the post-processor - @ Out, inputDict, list, list of dictionaries this object can process - """ - inputDict = [] - for inp in inputs: - if type(inp) == dict: - return [inp] - else: - self.metaKeys = inp.getVars('meta') - inputDictTemp = {} - inputDictTemp['inpVars'] = inp.getVars('input') - inputDictTemp['outVars'] = inp.getVars('output') - inputDictTemp['data'] = inp.asDataset(outType='dict')['data'] - inputDictTemp['dims'] = inp.getDimensions('output') - inputDictTemp['type'] = inp.type - inputDictTemp['metaKeys'] = self.metaKeys - inputDictTemp['numberRealizations'] = len(inp) - for key in self.metaKeys: - try: - inputDictTemp['data'][key] = inp.getMeta(pointwise=True,general=True)[key].values - except: - self.raiseADebug('The following key: ' + str(key) + ' has not passed to the Interfaced PP') - inputDictTemp['name'] = inp.name - inputDict.append(inputDictTemp) - return inputDict - - def returnFormat(self,location): - """ - Function that returns the format of either input or output - @ In, location, str, list of dataObjects handed to the post-processor - @ Out, form, str, format of either input or output - """ - if location == 'input': - form = self.postProcessor.inputFormat - elif location == 'output': - form = self.postProcessor.outputFormat - return form - - def collectOutput(self, finishedJob, output): - """ - Function to place all of the computed data into the output object, (DataObjects) - @ In, finishedJob, object, JobHandler object that is in charge of running this PostProcessor - @ In, output, object, the object where we want to place our computed results - @ Out, None - """ - super().collectOutput(finishedJob, output) diff --git a/framework/PostProcessorFunctions/HStoPSOperator.py b/framework/Models/PostProcessors/PostProcessorFunctions/HStoPSOperator.py similarity index 100% rename from framework/PostProcessorFunctions/HStoPSOperator.py rename to framework/Models/PostProcessors/PostProcessorFunctions/HStoPSOperator.py diff --git a/framework/PostProcessorFunctions/HistorySetSampling.py b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSampling.py similarity index 100% rename from framework/PostProcessorFunctions/HistorySetSampling.py rename to framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSampling.py diff --git a/framework/PostProcessorFunctions/HistorySetSnapShot.py b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSnapShot.py similarity index 98% rename from framework/PostProcessorFunctions/HistorySetSnapShot.py rename to framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSnapShot.py index 89d9dae945..b85dc71618 100644 --- a/framework/PostProcessorFunctions/HistorySetSnapShot.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSnapShot.py @@ -22,7 +22,8 @@ import importlib from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase -import HistorySetSync as HSS +# import HistorySetSync as HSS +from Models.PostProcessors import Factory as interfaceFactory from utils import InputData, InputTypes class HistorySetSnapShot(PostProcessorPluginBase): @@ -103,8 +104,8 @@ def initialize(self, runInfo, inputs, initDict=None): if self.extension is None: self.raiseAnError(IOError,'When using "timeSlice" an "extension" method must be specified for synchronizing!') #perform sync - PostProcessorInterfaces = importlib.import_module("PostProcessorInterfaces") - self.HSsyncPP = PostProcessorInterfaces.factory.returnInstance('HistorySetSync') + # PostProcessorInterfaces = importlib.import_module("PostProcessorInterfaces") + self.HSsyncPP = interfaceFactory.factory.returnInstance('HistorySetSync') self.HSsyncPP.setParams(self.numberOfSamples,self.pivotParameter,self.extension,syncMethod='grid') self.HSsyncPP.initialize(runInfo, inputs, initDict) diff --git a/framework/PostProcessorFunctions/HistorySetSync.py b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSync.py similarity index 99% rename from framework/PostProcessorFunctions/HistorySetSync.py rename to framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSync.py index 81e2b673e0..348f123c8e 100644 --- a/framework/PostProcessorFunctions/HistorySetSync.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSync.py @@ -25,7 +25,6 @@ from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes - class HistorySetSync(PostProcessorPluginBase): """ This Post-Processor performs the conversion from HistorySet to HistorySet diff --git a/framework/PostProcessorFunctions/TypicalHistoryFromHistorySet.py b/framework/Models/PostProcessors/PostProcessorFunctions/TypicalHistoryFromHistorySet.py similarity index 100% rename from framework/PostProcessorFunctions/TypicalHistoryFromHistorySet.py rename to framework/Models/PostProcessors/PostProcessorFunctions/TypicalHistoryFromHistorySet.py diff --git a/framework/PostProcessorFunctions/dataObjectLabelFilter.py b/framework/Models/PostProcessors/PostProcessorFunctions/dataObjectLabelFilter.py similarity index 100% rename from framework/PostProcessorFunctions/dataObjectLabelFilter.py rename to framework/Models/PostProcessors/PostProcessorFunctions/dataObjectLabelFilter.py diff --git a/framework/PostProcessorFunctions/testInterfacedPP.py b/framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP.py similarity index 100% rename from framework/PostProcessorFunctions/testInterfacedPP.py rename to framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP.py diff --git a/framework/PostProcessorFunctions/testInterfacedPP_PointSet.py b/framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP_PointSet.py similarity index 100% rename from framework/PostProcessorFunctions/testInterfacedPP_PointSet.py rename to framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP_PointSet.py diff --git a/framework/PostProcessorInterfaceBaseClass.py b/framework/PostProcessorInterfaceBaseClass.py deleted file mode 100644 index d58a323abf..0000000000 --- a/framework/PostProcessorInterfaceBaseClass.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on December 1st, 2015 - -""" -#External Modules------------------------------------------------------------------------------------ -import abc -import os -import numpy as np -#External Modules End-------------------------------------------------------------------------------- - -#Internal Modules------------------------------------------------------------------------------------ -from utils.cached_ndarray import c1darray -from utils import utils, InputData -from BaseClasses import MessageUser -from utils import InputData, InputTypes -#Internal Modules End-------------------------------------------------------------------------------- - -class CheckInterfacePP(InputData.CheckClass): - """ - Checks that this is an Interface Post Processor of a given type - """ - def __init__(self, name): - """ - Creates a CheckInterfacePP class - @ In, name, string, the method name - @ Out, None - """ - self.name = name - self.__reason = "" - - def check(self, node): - """ - Checks the node to see if it matches the checkDict - @ In, node, xml node to check - @ Out, bool, true if matches - """ - self.__reason = "" - passed = "subType" in node.attrib and node.attrib["subType"] == "InterfacedPostProcessor" - if not passed: - self.__reason = "subType=InterfacedPostProcessor not in attribs" - methods = node.findall("method") - if len(methods) == 1: - match = methods[0].text == self.name - if not match: - self.__reason += ""+repr(methods[0].text)+ "!=" + self.name + " " - passed = passed and match - else: - self.__reason += "wrong number of method blocks "+str(len(methods))+" " - passed = False - return passed - - def failCheckReason(self, node): - """ - returns a string about why the check failed - @ In, node, xml node to check - @ Out, string, message for user about why check failed. - """ - return self.__reason - -class PostProcessorInterfaceBase(utils.metaclass_insert(abc.ABCMeta,object), MessageUser): - """ - This class is the base interfaced post-processor class - It contains the three methods that need to be implemented: - - initialize - - run - - readMoreXML - """ - - @classmethod - def getInputSpecification(cls): - """ - Method to get a reference to a class that specifies the input data for - class cls. - @ In, cls, the class for which we are retrieving the specification - @ Out, inputSpecification, InputData.ParameterInput, class to use for - specifying input of cls. - """ - inputSpecification = InputData.parameterInputFactory(cls.__name__, ordered=False) - inputSpecification.setCheckClass(CheckInterfacePP("PostProcessorInterfaceBaseClass")) - inputSpecification.addParam("subType", InputTypes.StringType) - inputSpecification.addParam("name", InputTypes.StringType) - - return inputSpecification - - def __init__(self): - """ - Constructor - @ Out, None - """ - super().__init__() - self.type = self.__class__.__name__ - self.name = self.__class__.__name__ - - self.transformationSettings = {} # this dictionary is used to store all the setting required to back transform the data into its original format - # it gets filled in the run method and used in the inverse method - - - def initialize(self): - """ - Method to initialize the Interfaced Post-processor. Note that the user needs to specify two mandatory variables: - - self.inputFormat: dataObject that the PP is supposed to receive in input - - self.outputFormat: dataObject that the PP is supposed to generate in output - These two variables check that the input and output dictionaries match what PP is supposed to receive and generate - Refer to the manual on the format of these two dictionaries - @ In, None - @ Out, None - """ - self.inputFormat = None - self.outputFormat = None - - def readMoreXML(self,xmlNode): - """ - Function that reads elements this post-processor will use - @ In, xmlNode, ElementTree, Xml element node - @ Out, None - """ - pass - - def run(self,inputDic): - """ - Method to post-process the dataObjects - @ In, inputDic, dict, dictionary which contains the data inside the input DataObject - @ Out, None - """ - pass - - def _inverse(self,inputDic): - """ - Method to perform the inverse of the post-process action - @ In, inputDic, dict, dictionary which contains the data to be back pre-processed - @ Out, None - """ - pass - - def checkGeneratedDicts(self,outputDic): - """ - Method to check that dictionary generated in def run(self, inputDic) is consistent - @ In, outputDic, dict, dictionary generated by the run method - @ Out, True/False, bool, outcome of the outputDic check - """ - checkInp = self.checkInputFormat(outputDic['data']['input']) - checkOut = self.checkOutputFormat(outputDic['data']['output']) - if checkInp and checkOut: - return True - else: - if not checkInp: - self.raiseAWarning('PP Generation check on Inputs failed!') - if not checkOut: - self.raiseAWarning('PP Generation check on Outputs failed!') - return False - - def checkOutputFormat(self,outputDic): - """ - This method checks that the generated output part of the generated dictionary is built accordingly to outputFormat - @ In, outputDic, dict, dictionary generated by the run method - @ Out, outcome, bool, outcome of the outputDic check (True/False) - """ - outcome = True - if isinstance(outputDic,dict): - if self.outputFormat == 'HistorySet': - for key in outputDic: - if isinstance(outputDic[key],dict): - outcome = outcome and True - else: - self.raiseAWarning('Bad PP output type for key:',key,':',type(outputDic[key]),'; should be dict!') - outcome = False - for keys in outputDic[key]: - if isinstance(outputDic[key][keys],(np.ndarray,c1darray)): - outcome = outcome and True - else: - self.raiseAWarning('Bad PP output type for key:',key,keys,':',type(outputDic[key][keys]),'; should be np.ndarray or c1darray!') - outcome = False - else: # self.outputFormat == 'PointSet': - for key in outputDic: - if isinstance(outputDic[key],(np.ndarray,c1darray)): - outcome = outcome and True - else: - self.raiseAWarning('Bad PP output type for key:',key,':',type(outputDic[key]),'; should be np.ndarray or c1darray!') - outcome = False - else: - self.raiseAWarning('Bad PP output dict:',type(outputDic),'is not a dict!') - outcome = False - return outcome - - def checkInputFormat(self,outputDic): - """ - This method checks that the generated input part of the generated dictionary is built accordingly to outputFormat - @ In, outputDic, dict, dictionary generated by the run method - @ Out, outcome, bool, outcome of the outputDic check (True/False) - """ - outcome = True - if isinstance(outputDic,dict): - for key in outputDic: - if isinstance(outputDic[key],(np.ndarray,c1darray)): - outcome = outcome and True - else: - self.raiseAWarning('Bad PP output type for key:',key,':',type(outputDic[key]),'; should be np.ndarray or c1darray!') - outcome = False - else: - self.raiseAWarning('Bad PP output dict:',type(outputDic),'is not a dict!') - outcome = False - return outcome - - def checkArrayMonotonicity(time): - """ - This method checks that an array is increasing monotonically - @ In, time, numpy array, array to be checked - @ Out, outcome, bool, outcome of the monotonicity check - """ - outcome = True - for t in time: - if t != 0: - if time[t] > time[t-1]: - outcome = outcome and True - else: - outcome = outcome and False - return outcome diff --git a/framework/PostProcessorInterfaces.py b/framework/PostProcessorInterfaces.py deleted file mode 100644 index e7b3f27f4e..0000000000 --- a/framework/PostProcessorInterfaces.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on December 1, 2015 - -""" -#External Modules------------------------------------------------------------------------------------ -import os -from glob import glob -import inspect -#External Modules End-------------------------------------------------------------------------------- - -#Internal Modules------------------------------------------------------------------------------------ -from EntityFactoryBase import EntityFactory -from utils import utils -#Internal Modules End-------------------------------------------------------------------------------- - -__moduleInterfaceList = [] -startDir = os.path.join(os.path.dirname(__file__),'PostProcessorFunctions') -for dirr,_,_ in os.walk(startDir): - __moduleInterfaceList.extend(glob(os.path.join(dirr,"*.py"))) - utils.add_path(dirr) -__moduleImportedList = [] - - -factory = EntityFactory('InterfacedPostProcessor') -for moduleIndex in range(len(__moduleInterfaceList)): - if 'class' in open(__moduleInterfaceList[moduleIndex]).read(): - __moduleImportedList.append(utils.importFromPath(__moduleInterfaceList[moduleIndex],False)) - for key,modClass in inspect.getmembers(__moduleImportedList[-1], inspect.isclass): - # in this way we can get all the class methods - classMethods = [method for method in dir(modClass) if callable(getattr(modClass, method))] - if 'run' in classMethods: - factory.registerType(key, modClass) - -def interfaceClasses(): - """ - This returns the classes available - @ In, None - @ Out, interfaceClasses, list of classes available - """ - return list(factory._registeredTypes.values()) From b69ba93195d86a277ae9dff0c6fdc77c42f91d81 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 6 May 2021 20:06:15 -0600 Subject: [PATCH 39/51] update data mining --- framework/Models/PostProcessors/DataMining.py | 10 +++++----- plugins/HERON | 2 +- plugins/SR2ML | 2 +- plugins/TEAL | 2 +- .../Clustering/test_TD_KMeans_Filtered.xml | 9 +++------ .../Clustering/test_TD_MeanShift.xml | 11 +++-------- 6 files changed, 14 insertions(+), 22 deletions(-) diff --git a/framework/Models/PostProcessors/DataMining.py b/framework/Models/PostProcessors/DataMining.py index 5c795cb6ef..eab4a8a783 100644 --- a/framework/Models/PostProcessors/DataMining.py +++ b/framework/Models/PostProcessors/DataMining.py @@ -320,17 +320,17 @@ def inputToInternalForPreProcessor(self,currentInput): @ Out, inputDict, dict, an input dictionary that this post-processor can process """ inputDict = {'Features': {}, 'parameters': {}, 'Labels': {}, 'metadata': {}} - if self.PreProcessor._pp.returnFormat('output') not in ['PointSet']: + if not set(self.PreProcessor._pp.validDataType).issubset(set(['PointSet'])): self.raiseAnError(IOError, 'DataMining PP: this PP is employing a pre-processor PP which does not generates a PointSet.') - tempData = self.PreProcessor._pp.inputToInternal([currentInput]) + tempData = self.PreProcessor._pp.createPostProcessorInput([currentInput]) preProcessedData = self.PreProcessor._pp.run(tempData) if self.initializationOptionDict['KDD']['Features'] == 'input': featureList = currentInput.getVars('input') elif self.initializationOptionDict['KDD']['Features'] == 'output': dataList = preProcessedData['data'].keys() - # FIXME: this fix is due to the changes in the data structure of interface pp + # FIXME: this fix is due to the changes in the data structure of © pp toRemove = currentInput.getVars('input') + currentInput.getVars('meta') featureList = [elem for elem in dataList if elem not in toRemove] else: @@ -387,7 +387,7 @@ def initialize(self, runInfo, inputs, initDict): self.solutionExport = initDict["SolutionExport"] if "PreProcessor" in self.assemblerDict: self.PreProcessor = self.assemblerDict['PreProcessor'][0][3] - if not '_inverse' in dir(self.PreProcessor._pp.postProcessor): + if not '_inverse' in dir(self.PreProcessor._pp): self.raiseAnError(IOError, 'PostProcessor ' + self.name + ' is using a pre-processor where the method inverse has not implemented') if 'Metric' in self.assemblerDict: self.metric = self.assemblerDict['Metric'][0][3] @@ -620,7 +620,7 @@ def __runSciKitLearn(self, Input): rlzDims = {} for index,center in zip(indices,centers): tempDict[index] = center - centers = self.PreProcessor._pp.postProcessor._inverse(tempDict) + centers = self.PreProcessor._pp._inverse(tempDict) rlzs[self.labelFeature] = np.atleast_1d(indices) rlzDims[self.labelFeature] = [] if self.solutionExport.type == 'PointSet': diff --git a/plugins/HERON b/plugins/HERON index 8c8be8139b..1257af23da 160000 --- a/plugins/HERON +++ b/plugins/HERON @@ -1 +1 @@ -Subproject commit 8c8be8139b52e8bf395508b0097b137369d32df4 +Subproject commit 1257af23da7908c86172d08b9c409d85d0c1af5d diff --git a/plugins/SR2ML b/plugins/SR2ML index ad0c534bed..8d966ef9ed 160000 --- a/plugins/SR2ML +++ b/plugins/SR2ML @@ -1 +1 @@ -Subproject commit ad0c534bed90d1dcc1180de65879d0994f7f012e +Subproject commit 8d966ef9edc0c1244bf26b8a82e2d56352c21e9b diff --git a/plugins/TEAL b/plugins/TEAL index d3e08d213e..8ae86c461c 160000 --- a/plugins/TEAL +++ b/plugins/TEAL @@ -1 +1 @@ -Subproject commit d3e08d213ecea6b2f9738b746da224d36fbe41c8 +Subproject commit 8ae86c461c4bffbf9dd44fac531309c049642e8e diff --git a/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_KMeans_Filtered.xml b/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_KMeans_Filtered.xml index e1f47f6b88..c9600eb36c 100644 --- a/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_KMeans_Filtered.xml +++ b/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_KMeans_Filtered.xml @@ -116,21 +116,18 @@ True - - HS2PS + time sigma,rho,beta,x,y,z,time,x0,y0,z0 - - dataObjectLabelFilter + HistorySet 0 - - dataObjectLabelFilter + HistorySet 1 diff --git a/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_MeanShift.xml b/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_MeanShift.xml index c4eca18aa6..4c2f385d40 100644 --- a/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_MeanShift.xml +++ b/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_MeanShift.xml @@ -105,22 +105,17 @@ 20 - - HS2PS + time sigma,rho,beta,x,y,z,time,x0,y0,z0 - - dataObjectLabelFilter - HistorySet + 0 - - dataObjectLabelFilter - HistorySet + 1 From 8e8069072047b869193c326baf425fc03e99ceda Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 6 May 2021 21:07:52 -0600 Subject: [PATCH 40/51] revert changes in HS2PS --- framework/Models/PostProcessors/HS2PS.py | 107 ++++++++++++++++++----- 1 file changed, 85 insertions(+), 22 deletions(-) diff --git a/framework/Models/PostProcessors/HS2PS.py b/framework/Models/PostProcessors/HS2PS.py index 3e85598a5b..c3a6827308 100644 --- a/framework/Models/PostProcessors/HS2PS.py +++ b/framework/Models/PostProcessors/HS2PS.py @@ -63,7 +63,9 @@ def __init__(self): self.pivotParameter = None self.features = 'all' self.transformationSettings = {} - self.setInputDataType('xrDataset') + ## dataset option + # self.setInputDataType('xrDataset') + self.setInputDataType('dict') self.keepInputMeta(True) def initialize(self, runInfo, inputs, initDict=None): @@ -98,30 +100,91 @@ def _handleInput(self, paramInput): def run(self,inputIn): """ This method performs the actual transformation of the data object from history set to point set - @ In, inputIn, dict, dictionary of data. - inputIn = {'Data':listData, 'Files':listOfFiles}, - listData has the following format: (listOfInputVars, listOfOutVars, xr.Dataset) - @ Out, outDataset, xarray.Dataset, output dataset + @ In, inputIn, dict, dictionaries which contains the data inside the input DataObjects + inputIn = {'Data':listData, 'Files':listOfFiles}, + listData has the following format: (listOfInputVars, listOfOutVars, DataDict) with + DataDict is a dictionary that has the format + dataDict['dims'] = dict {varName:independentDimensions} + dataDict['metadata'] = dict {metaVarName:metaVarValue} + dataDict['type'] = str TypeOfDataObject + dataDict['inpVars'] = list of input variables + dataDict['outVars'] = list of output variables + dataDict['numberRealization'] = int SizeOfDataObject + dataDict['name'] = str DataObjectName + dataDict['metaKeys'] = list of meta variables + dataDict['data'] = dict {varName: varValue(1-D or 2-D numpy array)} + @ Out, outputDic, dict, output dictionary contains the converted PointSet data """ - inpVars, outVars, data = inputIn['Data'][0] + #### convert to use Dataset, the following has been tested. However, this changes + #### will cause the failure of DataMining since interfacePP is used by DataMining + #### for pre-processing data. When we convert all interfacePP to use Dataset, we can + #### use the following code, and eventually remove the dict option + # @ In, inputIn, dict, dictionary of data. + # inputIn = {'Data':listData, 'Files':listOfFiles}, + # listData has the following format: (listOfInputVars, listOfOutVars, xr.Dataset) + # @ Out, outDataset, xarray.Dataset, output dataset + # inpVars, outVars, data = inputIn['Data'][0] + # if self.features == 'all': + # self.features = outVars + # outDataset = data.drop_dims(self.pivotParameter) + # featDataset = data[self.features] + # if featDataset[self.features[-1]].isnull().sum() > 0: + # self.raiseAnError(IOError, 'Found misalignment in provided DataObject!') + # numRlz = data.dims['RAVEN_sample_ID'] + # featData = featDataset.to_array().values.transpose(1, 0, 2).reshape(numRlz, -1) + # varNames = [str(i) for i in range(featData.shape[-1])] + # convertedFeat = xr.DataArray(featData, dims=('RAVEN_sample_ID', 'outVars'), coords={'RAVEN_sample_ID':data['RAVEN_sample_ID'], 'outVars':varNames}) + # convertedFeatDataset = convertedFeat.to_dataset(dim='outVars') + # outDataset = xr.merge([outDataset, convertedFeatDataset]) + # ## self.transformationSettings is used by _inverse method when doing DataMining + # self.transformationSettings['vars'] = copy.deepcopy(self.features) + # self.transformationSettings['timeLength'] = data[self.pivotParameter].size + # self.transformationSettings['timeAxis'] = data[self.pivotParameter][0] + # self.transformationSettings['dimID'] = list(outDataset.keys()) + # return outDataset + ######### + + + inpVars, outVars, inputDict = inputIn['Data'][0] + outputDic = {'data': {}} + outputDic['dims'] = {} + numSamples = inputDict['numberRealizations'] + + # generate the input part of the output dictionary + for inputVar in inputDict['inpVars']: + outputDic['data'][inputVar] = inputDict['data'][inputVar] + + # generate the output part of the output dictionary if self.features == 'all': - self.features = outVars - outDataset = data.drop_dims(self.pivotParameter) - featDataset = data[self.features] - if featDataset[self.features[-1]].isnull().sum() > 0: - self.raiseAnError(IOError, 'Found misalignment in provided DataObject!') - numRlz = data.dims['RAVEN_sample_ID'] - featData = featDataset.to_array().values.transpose(1, 0, 2).reshape(numRlz, -1) - varNames = [str(i) for i in range(featData.shape[-1])] - convertedFeat = xr.DataArray(featData, dims=('RAVEN_sample_ID', 'outVars'), coords={'RAVEN_sample_ID':data['RAVEN_sample_ID'], 'outVars':varNames}) - convertedFeatDataset = convertedFeat.to_dataset(dim='outVars') - outDataset = xr.merge([outDataset, convertedFeatDataset]) - ## self.transformationSettings is used by _inverse method when doing DataMining + self.features = inputDict['outVars'] + + historyLength = len(inputDict['data'][self.features[0]][0]) + numVariables = historyLength*len(self.features) + for history in inputDict['data'][self.features[0]]: + if len(history) != historyLength: + self.raiseAnError(IOError, 'HS2PS Interfaced Post-Processor ' + str(self.name) + ' : one or more histories in the historySet have different time scale') + + tempDict = {} + matrix = np.zeros((numSamples,numVariables)) + for i in range(numSamples): + temp = np.empty(0) + for feature in self.features: + temp=np.append(temp,inputDict['data'][feature][i]) + matrix[i,:]=temp + + for key in range(numVariables): + outputDic['data'][str(key)] = np.empty(0) + outputDic['data'][str(key)] = matrix[:,key] + outputDic['dims'][str(key)] = [] + # add meta variables back + for key in inputDict['metaKeys']: + outputDic['data'][key] = inputDict['data'][key] + self.transformationSettings['vars'] = copy.deepcopy(self.features) - self.transformationSettings['timeLength'] = data[self.pivotParameter].size - self.transformationSettings['timeAxis'] = data[self.pivotParameter][0] - self.transformationSettings['dimID'] = list(outDataset.keys()) - return outDataset + self.transformationSettings['timeLength'] = historyLength + self.transformationSettings['timeAxis'] = inputDict['data'][self.pivotParameter][0] + self.transformationSettings['dimID'] = outputDic['data'].keys() + return outputDic def _inverse(self,inputDic): """ From 965cb64c09fc873479151018cf7fa9a9d686b777 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Thu, 6 May 2021 21:20:41 -0600 Subject: [PATCH 41/51] update HistorySetDelay to use PostProcessorPluginBase --- framework/Models/PostProcessors/Factory.py | 4 +-- .../{ => PostProcessorFunctions}/HS2PS.py | 0 .../HistorySetDelay.py | 31 ++++++++++++++----- 3 files changed, 25 insertions(+), 10 deletions(-) rename framework/Models/PostProcessors/{ => PostProcessorFunctions}/HS2PS.py (100%) rename framework/Models/PostProcessors/{ => PostProcessorFunctions}/HistorySetDelay.py (78%) diff --git a/framework/Models/PostProcessors/Factory.py b/framework/Models/PostProcessors/Factory.py index ed5b79ac6b..c2a2cbbf72 100644 --- a/framework/Models/PostProcessors/Factory.py +++ b/framework/Models/PostProcessors/Factory.py @@ -40,10 +40,10 @@ from .EconomicRatio import EconomicRatio from .RiskMeasuresDiscrete import RiskMeasuresDiscrete from .Validation import Validation -from .HistorySetDelay import HistorySetDelay -from .HS2PS import HS2PS ### PostProcessorFunctions (orig: InterfacedPostProcessor) +from .PostProcessorFunctions.HistorySetDelay import HistorySetDelay +from .PostProcessorFunctions.HS2PS import HS2PS from .PostProcessorFunctions.HStoPSOperator import HStoPSOperator from .PostProcessorFunctions.HistorySetSampling import HistorySetSampling from .PostProcessorFunctions.HistorySetSnapShot import HistorySetSnapShot diff --git a/framework/Models/PostProcessors/HS2PS.py b/framework/Models/PostProcessors/PostProcessorFunctions/HS2PS.py similarity index 100% rename from framework/Models/PostProcessors/HS2PS.py rename to framework/Models/PostProcessors/PostProcessorFunctions/HS2PS.py diff --git a/framework/Models/PostProcessors/HistorySetDelay.py b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetDelay.py similarity index 78% rename from framework/Models/PostProcessors/HistorySetDelay.py rename to framework/Models/PostProcessors/PostProcessorFunctions/HistorySetDelay.py index d0c4addab6..d97d8bda44 100644 --- a/framework/Models/PostProcessors/HistorySetDelay.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetDelay.py @@ -24,9 +24,9 @@ import xarray as xr from utils import InputData, InputTypes -from .PostProcessorInterface import PostProcessorInterface +from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase -class HistorySetDelay(PostProcessorInterface): +class HistorySetDelay(PostProcessorPluginBase): """ Class to get lagged or delayed data out of a history set. """ @@ -69,6 +69,8 @@ def __init__(self): self.validDataType = ['HistorySet'] #only available output is HistorySet self.outputMultipleRealizations = True #this PP will return a full set of realization self.printTag = 'PostProcessor HistorySetDelay' + self.setInputDataType('xrDataset') + self.keepInputMeta(True) def _handleInput(self, paramInput): """ @@ -83,16 +85,29 @@ def _handleInput(self, paramInput): child.parameterValues['steps'], child.parameterValues['default'])) - def run(self,inputDic): + def initialize(self, runInfo, inputs, initDict=None): + """ + Method to initialize the DataClassifier post-processor. + @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) + @ In, inputs, list, list of inputs + @ In, initDict, dict, optional, dictionary with initialization options + @ Out, None + """ + super().initialize(runInfo, inputs, initDict) + if len(inputs)>1: + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one dataObject') + if inputs[0].type != 'HistorySet': + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type)) + + def run(self,inputIn): """ Method to post-process the dataObjects - @ In, inputDic, list, list of DataObjects + @ In, inputIn, dict, dictionary of data. + inputIn = {'Data':listData, 'Files':listOfFiles}, + listData has the following format: (listOfInputVars, listOfOutVars, xr.Dataset) @ Out, data, xarray.DataSet, output dataset """ - if len(inputDic)>1: - self.raiseAnError(IOError, 'HistorySetDelay Interfaced Post-Processor ' + str(self.name) + ' accepts only one dataObject') - - data = inputDic[0].asDataset() + inpVars, outVars, data = inputIn['Data'][0] for delay in self.delays: original, new, steps, default = delay coords = {key: data[original][key] for key in data[original].dims} From 949117b52bed4cabc1bde6719295b1ed5db7b2c2 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 7 May 2021 10:36:33 -0600 Subject: [PATCH 42/51] convert failed tests --- .../ErrorChecks/unsyncedTypicalHistory.xml | 132 +++++++++--------- .../BasicStatistics/time_dep_asyncHists.xml | 5 +- .../Clustering/test_TD_KMeans_Filtered.xml | 2 - .../Clustering/test_TD_agglomerative_dtw.xml | 9 +- .../test_TD_agglomerative_euclidean.xml | 9 +- 5 files changed, 74 insertions(+), 83 deletions(-) diff --git a/tests/framework/ErrorChecks/unsyncedTypicalHistory.xml b/tests/framework/ErrorChecks/unsyncedTypicalHistory.xml index e9cbedab22..4f020e8978 100644 --- a/tests/framework/ErrorChecks/unsyncedTypicalHistory.xml +++ b/tests/framework/ErrorChecks/unsyncedTypicalHistory.xml @@ -1,66 +1,66 @@ - - - - - framework/ErrorChecks.unsyncedTypicalHistory - maljovec - 2017-07-21 - Models.PostProcessors.InterfacePostProcessor.TypicalHistoryFromHistorySet - - This test is aimed to check the functionality of RAVEN to error out in case some input - inconsistencies are found. - In this case, the HistorySet passed into the PostProcessor TypicalHistoryFromHistorySet - is not syncronized and the code must error out. - - - Adding this test description. - - - - - badData - readData,prepData - - - - unsynchronizedHistorySet.csv - - - - - TypicalHistoryFromHistorySet - 3600 - 32400 - seconds - - - - - - index,scaling - number - - seconds - - - - index,scaling - number - - seconds - - - - - - - rawDataFile - data - - - data - TypicalHistory - typicalData - - - + + + + framework/ErrorChecks.unsyncedTypicalHistory + maljovec + 2017-07-21 + Models.PostProcessors.InterfacePostProcessor.TypicalHistoryFromHistorySet + + This test is aimed to check the functionality of RAVEN to error out in case some input + inconsistencies are found. + In this case, the HistorySet passed into the PostProcessor TypicalHistoryFromHistorySet + is not syncronized and the code must error out. + + + Adding this test description. + Convert InterfacedPostProcessor: subType will be replaced with the text from method node, and method node will be removed + + + + + badData + readData,prepData + + + + unsynchronizedHistorySet.csv + + + + + 3600 + 32400 + seconds + + + + + + index,scaling + number + + seconds + + + + index,scaling + number + + seconds + + + + + + + rawDataFile + data + + + data + TypicalHistory + typicalData + + + + diff --git a/tests/framework/PostProcessors/BasicStatistics/time_dep_asyncHists.xml b/tests/framework/PostProcessors/BasicStatistics/time_dep_asyncHists.xml index da15bac2fd..d96e1951a7 100644 --- a/tests/framework/PostProcessors/BasicStatistics/time_dep_asyncHists.xml +++ b/tests/framework/PostProcessors/BasicStatistics/time_dep_asyncHists.xml @@ -21,7 +21,7 @@ In PR #882, the standard errors on statistics moments are implemented, these quantities are stored in the pointwise metadata, and will be printed out in the output csv files by default. In order to avoid regolding this test, the optional node 'what' with text values 'input, output' is added. - In this case, only the user requested variables as specified in the DataObjects are printed out. + In this case, only the user requested variables as specified in the DataObjects are printed out. @@ -70,8 +70,7 @@ x0,y0,z0,x,y,z x0,y0,z0,x,y,z - - HistorySetSync + all time extended diff --git a/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_KMeans_Filtered.xml b/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_KMeans_Filtered.xml index c9600eb36c..24aaa0e044 100644 --- a/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_KMeans_Filtered.xml +++ b/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_KMeans_Filtered.xml @@ -123,12 +123,10 @@ sigma,rho,beta,x,y,z,time,x0,y0,z0 - HistorySet 0 - HistorySet 1 diff --git a/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_agglomerative_dtw.xml b/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_agglomerative_dtw.xml index 171d2e9e5b..1b4325af0d 100644 --- a/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_agglomerative_dtw.xml +++ b/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_agglomerative_dtw.xml @@ -13,6 +13,7 @@ As reported in issue #805, RAVEN will not allow input DataObject of PostProcessor to be output DataObject, the output data object will be renamed, and the variable name of labels will be explicitly defined. + Convert InterfacedPostProcessor: subType will be replaced with the text from method node, and method node will be removed @@ -66,15 +67,11 @@ ward - - dataObjectLabelFilter - HistorySet + 0 - - dataObjectLabelFilter - HistorySet + 1 diff --git a/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_agglomerative_euclidean.xml b/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_agglomerative_euclidean.xml index 378ad94df5..8efdb8a3f4 100644 --- a/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_agglomerative_euclidean.xml +++ b/tests/framework/PostProcessors/DataMiningPostProcessor/Clustering/test_TD_agglomerative_euclidean.xml @@ -14,6 +14,7 @@ As reported in issue #805, RAVEN will not allow input DataObject of PostProcessor to be output DataObject, the output data object will be renamed, and the variable name of labels will be explicitly defined. + Convert InterfacedPostProcessor: subType will be replaced with the text from method node, and method node will be removed @@ -36,15 +37,11 @@ ward - - dataObjectLabelFilter - HistorySet + 0 - - dataObjectLabelFilter - HistorySet + 1 From 2db39f637fae8bb1b79597741a74931e473d3e41 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 7 May 2021 15:27:25 -0600 Subject: [PATCH 43/51] update test --- doc/workshop/forwardSampling/exercises/8_soln.xml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/workshop/forwardSampling/exercises/8_soln.xml b/doc/workshop/forwardSampling/exercises/8_soln.xml index 8f03f7a94c..d3fa0173ab 100755 --- a/doc/workshop/forwardSampling/exercises/8_soln.xml +++ b/doc/workshop/forwardSampling/exercises/8_soln.xml @@ -28,8 +28,7 @@ v0,y0,angle,x,y,timeOption,t - - HistorySetSync + t zeroed grid From 43dd641b2a85ae158e67210310e036c266245cd1 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 7 May 2021 15:29:40 -0600 Subject: [PATCH 44/51] add conversion script for PR 1533 --- scripts/conversionScripts/interface_pp.py | 64 +++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 scripts/conversionScripts/interface_pp.py diff --git a/scripts/conversionScripts/interface_pp.py b/scripts/conversionScripts/interface_pp.py new file mode 100644 index 0000000000..55ee633801 --- /dev/null +++ b/scripts/conversionScripts/interface_pp.py @@ -0,0 +1,64 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import xml.etree.ElementTree as ET +import xml.dom.minidom as pxml +import os + +def convert(tree,fileName=None): + """ + Converts input files to be compatible with merge request #1533 + The InterfacedPostProcessor has been removed, and the subType of given + PostProcessor has been replaced with text from method node + @ In, tree, xml.etree.ElementTree.ElementTree object, the contents of a RAVEN input file + @ In, fileName, the name for the raven input file + @Out, tree, xml.etree.ElementTree.ElementTree object, the modified RAVEN input file + """ + simulation = tree.getroot() + models = simulation.find('Models') + updateTestInfo = False + if models is not None: + postProcessors = models.findall('PostProcessor') + for pp in postProcessors: + subType = pp.get('subType') + if subType == 'InterfacedPostProcessor': + method = pp.find('method') + pp.set('subType', method.text.strip()) + pp.remove(method) + updateTestInfo = True + dataType = pp.find('dataType') + if dataType is not None: + pp.remove(dataType) + + if updateTestInfo: + TestInfo = simulation.find('TestInfo') + if TestInfo is not None: + revisions = TestInfo.find('revisions') + hasRev = True + if revisions is None: + revisions = ET.Element('revisions') + hasRev = False + rev = ET.Element('revision') + rev.attrib['author'] = 'wangc' + rev.attrib['date'] = '2020-05-07' + rev.text = 'Convert InterfacedPostProcessor: subType will be replaced with the text from method node, and method node will be removed' + revisions.append(rev) + if not hasRev: + TestInfo.append(revisions) + + return tree + +if __name__=='__main__': + import convert_utils + import sys + convert_utils.standardMain(sys.argv,convert) From b3232fe1a3d355a0b3843336116a6580ffcb1eb3 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 7 May 2021 15:32:13 -0600 Subject: [PATCH 45/51] remove RavenOut --- .../PostProcessors/RavenOutput.tex | 161 ------------------ doc/user_manual/postprocessor.tex | 9 - 2 files changed, 170 deletions(-) delete mode 100644 doc/user_manual/PostProcessors/RavenOutput.tex diff --git a/doc/user_manual/PostProcessors/RavenOutput.tex b/doc/user_manual/PostProcessors/RavenOutput.tex deleted file mode 100644 index 2eb805304e..0000000000 --- a/doc/user_manual/PostProcessors/RavenOutput.tex +++ /dev/null @@ -1,161 +0,0 @@ -\subsubsection{RavenOutput} -\label{RavenOutput} -The \textbf{RavenOutput} post-processor is specifically used -to gather data from RAVEN output files and generate a PointSet suitable for plotting or other analysis. -It can do this in two modes: static and dynamic. In static mode, the -PostProcessor reads from from several static XML output files produced by RAVEN. In dynamic mode, the PostProcessor -reads from a single dynamic XML output file and builds a PointSet where the pivot parameter (e.g. time) is the -input and the requested values are returned for each of the pivot parameter values (e.g. points in time). The -name for the pivot parameter will be taken directly from the XML structure. -% -Note: by default the PostProcessor operates in static mode; to read a dynamic file, the \xmlNode{dynamic} node must -be specified. -% -\ppType{RavenOutput}{RavenOutput} -% -\begin{itemize} - \item \xmlNode{dynamic}, \xmlDesc{string, optional field}, if included will trigger reading a single dynamic - file instead of multiple static files, unless the text of this field is \xmlString{false}, in which case it - will return to the default (multiple static files). \default(False) - \item \xmlNode{File}, \xmlDesc{XML Node, required field} - % - For each file to be read by this postprocessor, an entry in the \xmlNode{Files} node must be added, and a - \xmlNode{File} node must be added to the postprocessor input block. The \xmlNode{File} requires two - identifying attributes: - \begin{itemize} - \item \xmlAttr{name}, \xmlDesc{string, required field}, the RAVEN-assigned name of the file, - \item \xmlAttr{ID}, \xmlDesc{float, optional field}, the floating point ID that will be unique to this - file. This will appear as an entry in the output \xmlNode{DataObject} and the corresponding column are - the values extracted from this file. If not specified, RAVEN will attempt to find a suitable integer ID - to use, and a warning will be raised. - - When defining the \xmlNode{DataObject} that this postprocessor will write to, and when using the static - (non-\xmlNode{dynamic}) form of the postprocessor, the \xmlNode{input} space should be given as - \xmlString{ID}, and the output variables should be the outputs specified in the postprocessor. See the - examples below. In the data object, the variable values will be keyed on the \xmlString{ID} parameter. - \end{itemize} - Each value that needs to be extracted from the file needs to be specified by one of the following - \xmlNode{output} nodes within the \xmlNode{File} node: - \begin{itemize} - \item \xmlNode{output}, \xmlDesc{|-separated string, required field}, - the specification of the output to extract from the file. - RAVEN uses \texttt{xpath} as implemented in Python's \texttt{xml.etree} module to specify locations - in XML. For example, to search tags, use a path - separated by forward slash characters (``/''), starting under the root; this means the root node should not - be included in the path. See the example. For more details on xpath options available, see - \url{https://docs.python.org/2/library/xml.etree.elementtree.html#xpath-support}. - % - The \xmlNode{output} node requires the following attribute: - \begin{itemize} - \item \xmlAttr{name}, \xmlDesc{string, required field}, specifies the entry in the Data Object that - this value should be stored under. - \end{itemize} - - \end{itemize} - % -\end{itemize} -\textbf{Example (Static):} -Using an example, let us have two input files, named \emph{in1.xml} and \emph{in2.xml}. They appear as -follows. Note that the name of the variables we want changes slightly between the XML; this is fine. - -\textbf{\emph{in1.xml}} -\begin{lstlisting}[style=XML] - - - 6 - 7 - - -\end{lstlisting} -\textbf{\emph{in2.xml}} -\begin{lstlisting}[style=XML] - - - 6.1 - 7.1 - - -\end{lstlisting} - -The RAVEN input to extract this information would appear as follows. -We include an example of defining the \xmlNode{DataObject} that this postprocessor will write out to, for -further clarity. - -\begin{lstlisting}[style=XML] - - ... - - inp1.xml - inp2.xml - - ... - - ... - - - ans/val1 - ans/val2 - - - ans/first - ans/second - - - ... - - ... - - ... - - ID - first,second - - ... - - ... - -\end{lstlisting} - -\textbf{Example (Dynamic):} -For a dynamic example, consider this time-evolution of values example. \emph{inFile.xml} is a RAVEN dynamic -XML output. - -\textbf{\emph{in1.xml}} -\begin{lstlisting}[style=XML] - - -\end{lstlisting} -The RAVEN input to extract this information would appear as follows: -\begin{lstlisting}[style=XML] - - ... - - inFile.xml - - ... - - ... - - true - - ans|val1 - - - ... - - ... - -\end{lstlisting} -The resulting PointSet has \emph{time} as an input and \emph{first} as an output. diff --git a/doc/user_manual/postprocessor.tex b/doc/user_manual/postprocessor.tex index ab4dc89396..6f523fdc21 100644 --- a/doc/user_manual/postprocessor.tex +++ b/doc/user_manual/postprocessor.tex @@ -1106,15 +1106,6 @@ \subsubsection{TopologicalDecomposition} %%%%% PP External %%%%%%% \input{PostProcessors/InterfacedPostProcessors.tex} -% -%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%% RavenOutput PP %%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% FIXME: I think this is not valid anymore. We do not support the RavenOutput PP anymore. -% In addition, all the related tests are disabled. It seems to me we should remove all -% related files and tests (wangc - 4/23/2021) -%\input{PostProcessors/RavenOutput.tex} - %%%%%%%%%%%%%% ParetoFrontier PP %%%%%%%%%%%%%%%%%%% From 89c638a3847a1208b9edbf27da3e5c56944ad3fc Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 7 May 2021 16:11:40 -0600 Subject: [PATCH 46/51] update manual for interface postprocessors --- doc/user_manual/PostProcessors/HS2PS.tex | 16 + .../PostProcessors/HStoPSOperator.tex | 103 ++++++ .../PostProcessors/HistorySetSampling.tex | 35 ++ .../PostProcessors/HistorySetSnapShot.tex | 94 +++++ .../PostProcessors/HistorySetSync.tex | 32 ++ .../InterfacedPostProcessors.tex | 325 ------------------ .../TypicalHistoryFromHistorySet.tex | 35 ++ .../PostProcessors/dataObjectLabelFilter.tex | 19 + doc/user_manual/postprocessor.tex | 53 +-- 9 files changed, 366 insertions(+), 346 deletions(-) create mode 100644 doc/user_manual/PostProcessors/HS2PS.tex create mode 100644 doc/user_manual/PostProcessors/HStoPSOperator.tex create mode 100644 doc/user_manual/PostProcessors/HistorySetSampling.tex create mode 100644 doc/user_manual/PostProcessors/HistorySetSnapShot.tex create mode 100644 doc/user_manual/PostProcessors/HistorySetSync.tex create mode 100644 doc/user_manual/PostProcessors/TypicalHistoryFromHistorySet.tex create mode 100644 doc/user_manual/PostProcessors/dataObjectLabelFilter.tex diff --git a/doc/user_manual/PostProcessors/HS2PS.tex b/doc/user_manual/PostProcessors/HS2PS.tex new file mode 100644 index 0000000000..699412a418 --- /dev/null +++ b/doc/user_manual/PostProcessors/HS2PS.tex @@ -0,0 +1,16 @@ +\subsubsection{HS2PS} +\label{HS2PS} + +This Post-Processor performs a conversion from HistorySet to PointSet. +The conversion is made so that each history $H$ is converted to a single point $P$. +Assume that each history $H$ is a dict of $n$ output variables $x_1=[...],x_n=[...]$, then the resulting point $P$ is $P=concat(x_1,...,x_n)$. +Note: it is here assumed that all histories have been sync so that they have the same length, start point and end point. If you are not sure, do a pre-processing the the original history set. + +\ppType{HS2PS}{HS2PS} + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified (min, max, avg and value case): + +\begin{itemize} + \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable (only for avg) +\end{itemize} diff --git a/doc/user_manual/PostProcessors/HStoPSOperator.tex b/doc/user_manual/PostProcessors/HStoPSOperator.tex new file mode 100644 index 0000000000..d30f60a7c6 --- /dev/null +++ b/doc/user_manual/PostProcessors/HStoPSOperator.tex @@ -0,0 +1,103 @@ +\subsubsection{HStoPSOperator} +\label{HStoPSOperator} + +This Post-Processor performs the conversion from HistorySet to PointSet performing a projection of the output space. +\ppType{HStoPSOperator}{HStoPSOperator} +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are available: + +\begin{itemize} + \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable. Default is ``time''. + \nb Used just in case the \xmlNode{pivotValue}-based operation is requested + \item \xmlNode{operator}, \xmlDesc{string, optional field}, the operation to perform on the output space: + \begin{itemize} + \item \textbf{min}, compute the minimum of each variable along each single history + \item \textbf{max}, compute the maximum of each variable along each single history + \item \textbf{average}, compute the average of each variable along each single history + \item \textbf{all}, join together all of the each variable in + the history, and make the pivotParameter a regular + parameter. Unlike the min and max operators, this keeps + all the data, just organized differently. This operator + does this by propagating the other input parameters for + each item of the pivotParameter. + Table~\ref{operator_all_switch_before} shows an example + HistorySet with input parameter x, pivot parameter t, and + output parameter b and then + Table~\ref{operator_all_switch_after} shows the resulting + PointSet with input parameters x and t, and output + parameter b. Note that which parameters are input and which + are output in the resulting PointSet depends on the + DataObject specification. + \end{itemize} + \nb This node can be inputted only if \xmlNode{pivotValue} and \xmlNode{row} are not present + \item \xmlNode{pivotValue}, \xmlDesc{float, optional field}, the value of the pivotParameter with respect to the other outputs need to be extracted. + \nb This node can be inputted only if \xmlNode{operator} and \xmlNode{row} are not present + \item \xmlNode{pivotStrategy}, \xmlDesc{string, optional field}, The strategy to use for the pivotValue: + \begin{itemize} + \item \textbf{nearest}, find the value that is the nearest with respect the \xmlNode{pivotValue} + \item \textbf{floor}, find the value that is the nearest with respect to the \xmlNode{pivotValue} but less then the \xmlNode{pivotValue} + \item \textbf{celing}, find the value that is the nearest with respect to the \xmlNode{pivotValue} but greater then the \xmlNode{pivotValue} + \item \textbf{interpolate}, if the exact \xmlNode{pivotValue} can not be found, interpolate using a linear approach + \end{itemize} + + \nb Valid just in case \xmlNode{pivotValue} is present + \item \xmlNode{row}, \xmlDesc{int, optional field}, the row index at which the outputs need to be extracted. + \nb This node can be inputted only if \xmlNode{operator} and \xmlNode{pivotValue} are not present +\end{itemize} + +This example will show how the XML input block would look like: + +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + -1 + + + time + 0.3 + + + time + max + + + time + min + + + time + average + + ... + + ... + +\end{lstlisting} + +\begin{table}[!hbtp] + \caption{Starting HistorySet for operator all} + \label{operator_all_switch_before} +\begin{tabular}{l|l|l} + x & t & b \\ + \hline + 5.0 & & \\ + \hline + & 1.0 & 6.0 \\ + \hline + & 2.0 & 7.0 \\ +\end{tabular} +\end{table} + +\begin{table}[!hbtp] + \caption{Resulting PointSet after operator all} + \label{operator_all_switch_after} +\begin{tabular}{l|l|l} + x & t & b \\ + \hline + 5.0 & 1.0 & 6.0 \\ + \hline + 5.0 & 2.0 & 7.0 \\ +\end{tabular} +\end{table} diff --git a/doc/user_manual/PostProcessors/HistorySetSampling.tex b/doc/user_manual/PostProcessors/HistorySetSampling.tex new file mode 100644 index 0000000000..dcef2f965d --- /dev/null +++ b/doc/user_manual/PostProcessors/HistorySetSampling.tex @@ -0,0 +1,35 @@ +\subsubsection{HistorySetSampling} +\label{HistorySetSampling} + +This Post-Processor performs the conversion from HistorySet to HistorySet +The conversion is made so that each history H is re-sampled accordingly to a +specific sampling strategy. +It can be used to reduce the amount of space required by the HistorySet. + +\ppType{HistorySetSampling}{HistorySetSampling} + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{samplingType}, \xmlDesc{string, required field}, specifies the type of sampling method to be used: + \begin{itemize} + \item uniform: the set of \xmlNode{numberOfSamples} samples are uniformly distributed along the time axis + \item firstDerivative: the set of \xmlNode{numberOfSamples} samples are distributed along the time axis in regions with + higher first order derivative + \item secondDerivative: the set of \xmlNode{numberOfSamples} samples are distributed along the time axis in regions with + higher second order derivative + \item filteredFirstDerivative: samples are located where the first derivative is greater than the specified \xmlNode{tolerance} value + (hence, the number of samples can vary from history to history) + \item filteredSecondDerivative: samples are located where the second derivative is greater than the specified \xmlNode{tolerance} value + (hence, the number of samples can vary from history to history) + \end{itemize} + \item \xmlNode{numberOfSamples}, \xmlDesc{integer, optional field}, number of samples (required only for the following sampling + types: uniform, firstDerivative secondDerivative) + \item \xmlNode{pivotParameter}, \xmlDesc{string, required field}, ID of the temporal variable + \item \xmlNode{interpolation}, \xmlDesc{string, optional field}, type of interpolation to be employed for the history reconstruction + (required only for the following sampling types: uniform, firstDerivative secondDerivative). + Valid types of interpolation to specified: linear, nearest, zero, slinear, quadratic, cubic, intervalAverage + \item \xmlNode{tolerance}, \xmlDesc{string, optional field}, tolerance level (required only for the following sampling types: + filteredFirstDerivative or filteredSecondDerivative) +\end{itemize} diff --git a/doc/user_manual/PostProcessors/HistorySetSnapShot.tex b/doc/user_manual/PostProcessors/HistorySetSnapShot.tex new file mode 100644 index 0000000000..4b71153a58 --- /dev/null +++ b/doc/user_manual/PostProcessors/HistorySetSnapShot.tex @@ -0,0 +1,94 @@ +\subsubsection{HistorySetSnapShot} +\label{HistorySetSnapShot} + +This Post-Processor performs a conversion from HistorySet to PointSet. +The conversion is made so that each history $H$ is converted to a single point $P$. +There are several methods that can be employed to choose the single point from the history: +\begin{itemize} + \item min: Take a time slice when the \xmlNode{pivotVar} is at its smallest value, + \item max: Take a time slice when the \xmlNode{pivotVar} is at its largest value, + \item average: Take a time slice when the \xmlNode{pivotVar} is at its time-weighted average value, + \item value: Take a time slice when the \xmlNode{pivotVar} \emph{first passes} its specified value, + \item timeSlice: Take a time slice index from the sampled time instance space. +\end{itemize} +To demonstrate the timeSlice, assume that each history H is a dict of n output variables $x_1=[...], +x_n=[...]$, then the resulting point P is at time instant index t: $P=[x_1[t],...,x_n[t]]$. + +Choosing one the these methods for the \xmlNode{type} node will take a time slice for all the variables in the +output space based on the provided parameters. Alternatively, a \xmlString{mixed} type can be used, in which +each output variable can use a different time slice parameter. In other words, you can take the max of one +variable while taking the minimum of another, etc. + +\ppType{HistorySetSnapShot}{HistorySetSnapShot} + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{type}, \xmlDesc{string, required field}, type of operation: \xmlString{min}, \xmlString{max}, + \xmlString{average}, \xmlString{value}, \xmlString{timeSlice}, or \xmlString{mixed} + \item \xmlNode{extension}, \xmlDesc{string, required field}, type of extension when the sync process goes outside the boundaries of the history (zeroed or extended) + \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, name of the temporal variable. Required for the + \xmlString{average} and \xmlString{timeSlice} methods. +\end{itemize} + +If a \xmlString{timeSlice} type is in use, the following nodes also are required: +\begin{itemize} + \item \xmlNode{timeInstant}, \xmlDesc{integer, required field}, required and only used in the + \xmlString{timeSlice} type. Location of the time slice (integer index) + \item \xmlNode{numberOfSamples}, \xmlDesc{integer, required field}, number of samples +\end{itemize} + +If instead a \xmlString{min}, \xmlString{max}, \xmlString{average}, or \xmlString{value} is used, the following nodes +are also required: +\begin{itemize} + \item \xmlNode{pivotVar}, \xmlDesc{string, required field}, Name of the chosen indexing variable (the + variable whose min, max, average, or value is used to determine the time slice) + \item \xmlNode{pivotVal}, \xmlDesc{float, optional field}, required for \xmlString{value} type, the value for the chosen variable +\end{itemize} + +Lastly, if a \xmlString{mixed} approach is used, the following nodes apply: +\begin{itemize} + \item \xmlNode{max}, \xmlDesc{string, optional field}, the names of variables whose output should be their + own maximum value within the history. + \item \xmlNode{min}, \xmlDesc{string, optional field}, the names of variables whose output should be their + own minimum value within the history. + \item \xmlNode{average}, \xmlDesc{string, optional field}, the names of variables whose output should be their + own average value within the history. Note that a \xmlNode{pivotParameter} node is required to perform averages. + \item \xmlNode{value}, \xmlDesc{string, optional field}, the names of variables whose output should be taken + at a time slice determined by another variable. As with the non-mixed \xmlString{value} type, the first + time the \xmlAttr{pivotVar} crosses the specified \xmlAttr{pivotVal} will be the time slice taken. + This node requires two attributes, if used: + \begin{itemize} + \item \xmlAttr{pivotVar}, \xmlDesc{string, required field}, the name of the variable on which the time + slice will be performed. That is, if we want the value of $y$ when $t=0.245$, + this attribute would be \xmlString{t}. + \item \xmlAttr{pivotVal}, \xmlDesc{float, required field}, the value of the \xmlAttr{pivotVar} on which the time + slice will be performed. That is, if we want the value of $y$ when $t=0.245$, + this attribute would be \xmlString{0.245}. + \end{itemize} + Note that all the outputs of the \xmlNode{DataObject} output of this postprocessor must be listed under one + of the \xmlString{mixed} node types in order for values to be returned. +\end{itemize} + +\textbf{Example (mixed):} +This example will output the average value of $x$ for $x$, the value of $y$ at +time$=0.245$ for $y$, and the value of $z$ at $x=4.0$ for $z$. +\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] + + ... + + ... + + mixed + x + y + z + time + zeroed + + ... + + ... + +\end{lstlisting} diff --git a/doc/user_manual/PostProcessors/HistorySetSync.tex b/doc/user_manual/PostProcessors/HistorySetSync.tex new file mode 100644 index 0000000000..a2ec16042a --- /dev/null +++ b/doc/user_manual/PostProcessors/HistorySetSync.tex @@ -0,0 +1,32 @@ +\subsubsection{HistorySetSync} +\label{HistorySetSync} + +This Post-Processor performs the conversion from HistorySet to HistorySet +The conversion is made so that all histories are synchronized in time. +It can be used to allow the histories to be sampled at the same time instant. + +There are two possible synchronization methods, specified through the \xmlNode{syncMethod} node. If the +\xmlNode{syncMethod} is \xmlString{grid}, a \xmlNode{numberOfSamples} node is specified, +which yields an equally-spaced grid of time points. The output values for these points will be linearly derived +using nearest sampled time points, and the new HistorySet will contain only the new grid points. + +The other methods are used by specifying \xmlNode{syncMethod} as \xmlString{all}, \xmlString{min}, or +\xmlString{max}. For \xmlString{all}, the postprocessor will iterate through the +existing histories, collect all the time points used in any of them, and use these as the new grid on which to +establish histories, retaining all the exact original values and interpolating linearly where necessary. +In the event of \xmlString{min} or \xmlString{max}, the postprocessor will find the smallest or largest time +history, respectively, and use those time values as nodes to interpolate between. + +\ppType{HistorySetSync}{HistorySetSync} + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{pivotParameter}, \xmlDesc{string, required field}, ID of the temporal variable + \item \xmlNode{extension}, \xmlDesc{string, required field}, type of extension when the sync process goes outside the boundaries of the history (zeroed or extended) + \item \xmlNode{syncMethod}, \xmlDesc{string, required field}, synchronization strategy to employ (see + description above). Options are \xmlString{grid}, \xmlString{all}, \xmlString{max}, \xmlString{min}. + \item \xmlNode{numberOfSamples}, \xmlDesc{integer, optional field}, required if \xmlNode{syncMethod} is + \xmlString{grid}, number of new time samples +\end{itemize} diff --git a/doc/user_manual/PostProcessors/InterfacedPostProcessors.tex b/doc/user_manual/PostProcessors/InterfacedPostProcessors.tex index 1756c45b71..915f0d9854 100644 --- a/doc/user_manual/PostProcessors/InterfacedPostProcessors.tex +++ b/doc/user_manual/PostProcessors/InterfacedPostProcessors.tex @@ -111,328 +111,3 @@ \subsubsection{Interfaced} inputDict['output']['data'][hist2] = {'time': array([ .1,.2,.3]), 'outputVar1':array([ .14,.15,.16])} \end{lstlisting} - - -\paragraph{Method: HStoPSOperator} - -This Post-Processor performs the conversion from HistorySet to PointSet performing a projection of the output space. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are available: - -\begin{itemize} - \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable. Default is ``time''. - \nb Used just in case the \xmlNode{pivotValue}-based operation is requested - \item \xmlNode{operator}, \xmlDesc{string, optional field}, the operation to perform on the output space: - \begin{itemize} - \item \textbf{min}, compute the minimum of each variable along each single history - \item \textbf{max}, compute the maximum of each variable along each single history - \item \textbf{average}, compute the average of each variable along each single history - \item \textbf{all}, join together all of the each variable in - the history, and make the pivotParameter a regular - parameter. Unlike the min and max operators, this keeps - all the data, just organized differently. This operator - does this by propogating the other input parameters for - each item of the pivotParameter. - Table~\ref{operator_all_switch_before} shows an example - HistorySet with input parameter x, pivot parameter t, and - output parameter b and then - Table~\ref{operator_all_switch_after} shows the resulting - PointSet with input parameters x and t, and output - parameter b. Note that which parameters are input and which - are output in the resulting PointSet depends on the - DataObject specification. - \end{itemize} - \nb This node can be inputted only if \xmlNode{pivotValue} and \xmlNode{row} are not present - \item \xmlNode{pivotValue}, \xmlDesc{float, optional field}, the value of the pivotParameter with respect to the other outputs need to be extracted. - \nb This node can be inputted only if \xmlNode{operator} and \xmlNode{row} are not present - \item \xmlNode{pivotStrategy}, \xmlDesc{string, optional field}, The strategy to use for the pivotValue: - \begin{itemize} - \item \textbf{nearest}, find the value that is the nearest with respect the \xmlNode{pivotValue} - \item \textbf{floor}, find the value that is the nearest with respect to the \xmlNode{pivotValue} but less then the \xmlNode{pivotValue} - \item \textbf{celing}, find the value that is the nearest with respect to the \xmlNode{pivotValue} but greater then the \xmlNode{pivotValue} - \item \textbf{interpolate}, if the exact \xmlNode{pivotValue} can not be found, interpolate using a linear approach - \end{itemize} - - \nb Valid just in case \xmlNode{pivotValue} is present - \item \xmlNode{row}, \xmlDesc{int, optional field}, the row index at which the outputs need to be extracted. - \nb This node can be inputted only if \xmlNode{operator} and \xmlNode{pivotValue} are not present -\end{itemize} - -This example will show how the XML input block would look like: - -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - HStoPSOperator - -1 - - - HStoPSOperator - time - 0.3 - - - HStoPSOperator - time - max - - - HStoPSOperator - time - min - - - HStoPSOperator - time - average - - ... - - ... - -\end{lstlisting} - -\begin{table}[!hbtp] - \caption{Starting HistorySet for operator all} - \label{operator_all_switch_before} -\begin{tabular}{l|l|l} - x & t & b \\ - \hline - 5.0 & & \\ - \hline - & 1.0 & 6.0 \\ - \hline - & 2.0 & 7.0 \\ -\end{tabular} -\end{table} - -\begin{table}[!hbtp] - \caption{Resulting PointSet after operator all} - \label{operator_all_switch_after} -\begin{tabular}{l|l|l} - x & t & b \\ - \hline - 5.0 & 1.0 & 6.0 \\ - \hline - 5.0 & 2.0 & 7.0 \\ -\end{tabular} -\end{table} - -\paragraph{Method: HistorySetSampling} -This Post-Processor performs the conversion from HistorySet to HistorySet -The conversion is made so that each history H is re-sampled accordingly to a -specific sampling strategy. -It can be used to reduce the amount of space required by the HistorySet. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{samplingType}, \xmlDesc{string, required field}, specifies the type of sampling method to be used: - \begin{itemize} - \item uniform: the set of \xmlNode{numberOfSamples} samples are uniformly distributed along the time axis - \item firstDerivative: the set of \xmlNode{numberOfSamples} samples are distributed along the time axis in regions with - higher first order derivative - \item secondDerivative: the set of \xmlNode{numberOfSamples} samples are distributed along the time axis in regions with - higher second order derivative - \item filteredFirstDerivative: samples are located where the first derivative is greater than the specified \xmlNode{tolerance} value - (hence, the number of samples can vary from history to history) - \item filteredSecondDerivative: samples are located where the second derivative is greater than the specified \xmlNode{tolerance} value - (hence, the number of samples can vary from history to history) - \end{itemize} - \item \xmlNode{numberOfSamples}, \xmlDesc{integer, optional field}, number of samples (required only for the following sampling - types: uniform, firstDerivative secondDerivative) - \item \xmlNode{pivotParameter}, \xmlDesc{string, required field}, ID of the temporal variable - \item \xmlNode{interpolation}, \xmlDesc{string, optional field}, type of interpolation to be employed for the history reconstruction - (required only for the following sampling types: uniform, firstDerivative secondDerivative). - Valid types of interpolation to specified: linear, nearest, zero, slinear, quadratic, cubic, intervalAverage - \item \xmlNode{tolerance}, \xmlDesc{string, optional field}, tolerance level (required only for the following sampling types: - filteredFirstDerivative or filteredSecondDerivative) -\end{itemize} - -\paragraph{Method: HistorySetSync} -This Post-Processor performs the conversion from HistorySet to HistorySet -The conversion is made so that all histories are synchronized in time. -It can be used to allow the histories to be sampled at the same time instant. - -There are two possible synchronization methods, specified through the \xmlNode{syncMethod} node. If the -\xmlNode{syncMethod} is \xmlString{grid}, a \xmlNode{numberOfSamples} node is specified, -which yields an equally-spaced grid of time points. The output values for these points will be linearly derived -using nearest sampled time points, and the new HistorySet will contain only the new grid points. - -The other methods are used by specifying \xmlNode{syncMethod} as \xmlString{all}, \xmlString{min}, or -\xmlString{max}. For \xmlString{all}, the postprocessor will iterate through the -existing histories, collect all the time points used in any of them, and use these as the new grid on which to -establish histories, retaining all the exact original values and interpolating linearly where necessary. -In the event of \xmlString{min} or \xmlString{max}, the postprocessor will find the smallest or largest time -history, respectively, and use those time values as nodes to interpolate between. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{pivotParameter}, \xmlDesc{string, required field}, ID of the temporal variable - \item \xmlNode{extension}, \xmlDesc{string, required field}, type of extension when the sync process goes outside the boundaries of the history (zeroed or extended) - \item \xmlNode{syncMethod}, \xmlDesc{string, required field}, synchronization strategy to employ (see - description above). Options are \xmlString{grid}, \xmlString{all}, \xmlString{max}, \xmlString{min}. - \item \xmlNode{numberOfSamples}, \xmlDesc{integer, optional field}, required if \xmlNode{syncMethod} is - \xmlString{grid}, number of new time samples -\end{itemize} - -\paragraph{Method: HistorySetSnapShot} -This Post-Processor performs a conversion from HistorySet to PointSet. -The conversion is made so that each history $H$ is converted to a single point $P$. -There are several methods that can be employed to choose the single point from the history: -\begin{itemize} - \item min: Take a time slice when the \xmlNode{pivotVar} is at its smallest value, - \item max: Take a time slice when the \xmlNode{pivotVar} is at its largest value, - \item average: Take a time slice when the \xmlNode{pivotVar} is at its time-weighted average value, - \item value: Take a time slice when the \xmlNode{pivotVar} \emph{first passes} its specified value, - \item timeSlice: Take a time slice index from the sampled time instance space. -\end{itemize} -To demonstrate the timeSlice, assume that each history H is a dict of n output variables $x_1=[...], -x_n=[...]$, then the resulting point P is at time instant index t: $P=[x_1[t],...,x_n[t]]$. - -Choosing one the these methods for the \xmlNode{type} node will take a time slice for all the variables in the -output space based on the provided parameters. Alternatively, a \xmlString{mixed} type can be used, in which -each output variable can use a different time slice parameter. In other words, you can take the max of one -variable while taking the minimum of another, etc. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{type}, \xmlDesc{string, required field}, type of operation: \xmlString{min}, \xmlString{max}, - \xmlString{average}, \xmlString{value}, \xmlString{timeSlice}, or \xmlString{mixed} - \item \xmlNode{extension}, \xmlDesc{string, required field}, type of extension when the sync process goes outside the boundaries of the history (zeroed or extended) - \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, name of the temporal variable. Required for the - \xmlString{average} and \xmlString{timeSlice} methods. -\end{itemize} - -If a \xmlString{timeSlice} type is in use, the following nodes also are required: -\begin{itemize} - \item \xmlNode{timeInstant}, \xmlDesc{integer, required field}, required and only used in the - \xmlString{timeSlice} type. Location of the time slice (integer index) - \item \xmlNode{numberOfSamples}, \xmlDesc{integer, required field}, number of samples -\end{itemize} - -If instead a \xmlString{min}, \xmlString{max}, \xmlString{average}, or \xmlString{value} is used, the following nodes -are also required: -\begin{itemize} - \item \xmlNode{pivotVar}, \xmlDesc{string, required field}, Name of the chosen indexing variable (the - variable whose min, max, average, or value is used to determine the time slice) - \item \xmlNode{pivotVal}, \xmlDesc{float, optional field}, required for \xmlString{value} type, the value for the chosen variable -\end{itemize} - -Lastly, if a \xmlString{mixed} approach is used, the following nodes apply: -\begin{itemize} - \item \xmlNode{max}, \xmlDesc{string, optional field}, the names of variables whose output should be their - own maximum value within the history. - \item \xmlNode{min}, \xmlDesc{string, optional field}, the names of variables whose output should be their - own minimum value within the history. - \item \xmlNode{average}, \xmlDesc{string, optional field}, the names of variables whose output should be their - own average value within the history. Note that a \xmlNode{pivotParameter} node is required to perform averages. - \item \xmlNode{value}, \xmlDesc{string, optional field}, the names of variables whose output should be taken - at a time slice determined by another variable. As with the non-mixed \xmlString{value} type, the first - time the \xmlAttr{pivotVar} crosses the specified \xmlAttr{pivotVal} will be the time slice taken. - This node requires two attributes, if used: - \begin{itemize} - \item \xmlAttr{pivotVar}, \xmlDesc{string, required field}, the name of the variable on which the time - slice will be performed. That is, if we want the value of $y$ when $t=0.245$, - this attribute would be \xmlString{t}. - \item \xmlAttr{pivotVal}, \xmlDesc{float, required field}, the value of the \xmlAttr{pivotVar} on which the time - slice will be performed. That is, if we want the value of $y$ when $t=0.245$, - this attribute would be \xmlString{0.245}. - \end{itemize} - Note that all the outputs of the \xmlNode{DataObject} output of this postprocessor must be listed under one - of the \xmlString{mixed} node types in order for values to be returned. -\end{itemize} - -\textbf{Example (mixed):} -This example will output the average value of $x$ for $x$, the value of $y$ at -time$=0.245$ for $y$, and the value of $z$ at $x=4.0$ for $z$. -\begin{lstlisting}[style=XML,morekeywords={subType,debug,name,class,type}] - - ... - - ... - - HistorySetSnapShot - mixed - x - y - z - time - zeroed - - ... - - ... - -\end{lstlisting} - - -\paragraph{Method: HS2PS} - -This Post-Processor performs a conversion from HistorySet to PointSet. -The conversion is made so that each history $H$ is converted to a single point $P$. -Assume that each history $H$ is a dict of $n$ output variables $x_1=[...],x_n=[...]$, then the resulting point $P$ is $P=concat(x_1,...,x_n)$. -Note: it is here assumed that all histories have been sync so that they have the same length, start point and end point. If you are not sure, do a pre-processing the the original history set. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified (min, max, avg and value case): - -\begin{itemize} - \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable (only for avg) -\end{itemize} - -\paragraph{Method: TypicalHistoryFromHistorySet} -This Post-Processor performs a simplified procedure of \cite{wilcox2008users} to form a ``typical'' time series from multiple time series. The input should be a HistorySet, with each history in the HistorySet synchronized. For HistorySet that is not synchronized, use Post-Processor method \textbf{HistorySetSync} to synchronize the data before running this method. - -Each history in input HistorySet is first converted to multiple histories each has maximum time specified in \xmlNode{outputLen} (see below). Each converted history $H_i$ is divided into a set of subsequences $\{H_i^j\}$, and the division is guided by the \xmlNode{subseqLen} node specified in the input XML. The value of \xmlNode{subseqLen} should be a list of positive numbers that specify the length of each subsequence. If the number of subsequence for each history is more than the number of values given in \xmlNode{subseqLen}, the values in \xmlNode{subseqLen} would be reused. - -For each variable $x$, the method first computes the empirical CDF (cumulative density function) by using all the data values of $x$ in the HistorySet. This CDF is termed as long-term CDF for $x$. Then for each subsequence $H_i^j$, the method computes the empirical CDF by using all the data values of $x$ in $H_i^j$. This CDF is termed as subsequential CDF. For the first interval window (i.e., $j=1$), the method computes the Finkelstein-Schafer (FS) statistics \cite{finkelstein1971improved} between the long term CDF and the subsequential CDF of $H_i^1$ for each $i$. The FS statistics is defined as following. -\begin{align*} -FS & = \sum_x FS_x\\ -FS_x &= \frac{1}{N}\sum_{n=1}^N\delta_n -\end{align*} -where $N$ is the number of value reading in the empirical CDF and $\delta_n$ is the absolute difference between the long term CDF and the subsequential CDF at value $x_n$. The subsequence $H_i^1$ with minimal FS statistics will be selected as the typical subsequence for the interval window $j=1$. Such process repeats for $j=2,3,\dots$ until all subsequences have been processed. Then all the typical subsequences will be concatenated to form a complete history. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independent of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable - \default{Time} - \item \xmlNode{subseqLen}, \xmlDesc{integers, required field}, length of the divided subsequence (see above) - \item \xmlNode{outputLen}, \xmlDesc{integer, optional field}, maximum value of the temporal variable for the generated typical history - \default{Maximum value of the variable with name of \xmlNode{pivotParameter}} -\end{itemize} - -For example, consider history of data collected over three years in one-second increments, -where the user wants a single \emph{typical year} extracted from the data. -The user wants this data constructed by combining twelve equal \emph{typical month} -segments. In this case, the parameter \xmlNode{outputLen} should be \texttt{31536000} (the number of seconds -in a year), while the parameter \xmlNode{subseqLen} should be \texttt{2592000} (the number of seconds in a -month). Using a value for \xmlNode{subseqLen} that is either much, much smaller than \xmlNode{outputLen} or -of equal size to \xmlNode{outputLen} might have unexpected results. In general, we recommend using a -\xmlNode{subseqLen} that is roughly an order of magnitude smaller than \xmlNode{outputLen}. - -\paragraph{Method: dataObjectLabelFilter} -This Post-Processor allows to filter the portion of a dataObject, either PointSet or HistorySet, with a given clustering label. -A clustering algorithm associates a unique cluster label to each element of the dataObject (PointSet or HistorySet). -This cluster label is a natural number ranging from $0$ (or $1$ depending on the algorithm) to $N$ where $N$ is the number of obtained clusters. -Recall that some clustering algorithms (e.g., K-Means) receive $N$ as input while others (e.g., Mean-Shift) determine $N$ after clustering has been performed. -Thus, this Post-Processor is naturally employed after a data-mining clustering techniques has been performed on a dataObject so that each clusters -can be analyzed separately. - -In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, -independently of the \xmlAttr{subType} specified: - -\begin{itemize} - \item \xmlNode{label}, \xmlDesc{string, required field}, name of the clustering label - \item \xmlNode{clusterIDs}, \xmlDesc{integers, required field}, ID of the selected clusters. Note that more than one ID can be provided as input -\end{itemize} diff --git a/doc/user_manual/PostProcessors/TypicalHistoryFromHistorySet.tex b/doc/user_manual/PostProcessors/TypicalHistoryFromHistorySet.tex new file mode 100644 index 0000000000..f9de08f658 --- /dev/null +++ b/doc/user_manual/PostProcessors/TypicalHistoryFromHistorySet.tex @@ -0,0 +1,35 @@ +\subsubsection{TypicalHistoryFromHistorySet} +\label{TypicalHistoryFromHistorySet} + +This Post-Processor performs a simplified procedure of \cite{wilcox2008users} to form a ``typical'' time series from multiple time series. The input should be a HistorySet, with each history in the HistorySet synchronized. For HistorySet that is not synchronized, use Post-Processor method \textbf{HistorySetSync} to synchronize the data before running this method. + +Each history in input HistorySet is first converted to multiple histories each has maximum time specified in \xmlNode{outputLen} (see below). Each converted history $H_i$ is divided into a set of subsequences $\{H_i^j\}$, and the division is guided by the \xmlNode{subseqLen} node specified in the input XML. The value of \xmlNode{subseqLen} should be a list of positive numbers that specify the length of each subsequence. If the number of subsequence for each history is more than the number of values given in \xmlNode{subseqLen}, the values in \xmlNode{subseqLen} would be reused. + +For each variable $x$, the method first computes the empirical CDF (cumulative density function) by using all the data values of $x$ in the HistorySet. This CDF is termed as long-term CDF for $x$. Then for each subsequence $H_i^j$, the method computes the empirical CDF by using all the data values of $x$ in $H_i^j$. This CDF is termed as subsequential CDF. For the first interval window (i.e., $j=1$), the method computes the Finkelstein-Schafer (FS) statistics \cite{finkelstein1971improved} between the long term CDF and the subsequential CDF of $H_i^1$ for each $i$. The FS statistics is defined as following. +\begin{align*} +FS & = \sum_x FS_x\\ +FS_x &= \frac{1}{N}\sum_{n=1}^N\delta_n +\end{align*} +where $N$ is the number of value reading in the empirical CDF and $\delta_n$ is the absolute difference between the long term CDF and the subsequential CDF at value $x_n$. The subsequence $H_i^1$ with minimal FS statistics will be selected as the typical subsequence for the interval window $j=1$. Such process repeats for $j=2,3,\dots$ until all subsequences have been processed. Then all the typical subsequences will be concatenated to form a complete history. + +\ppType{TypicalHistoryFromHistorySet}{TypicalHistoryFromHistorySet} + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independent of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable + \default{Time} + \item \xmlNode{subseqLen}, \xmlDesc{integers, required field}, length of the divided subsequence (see above) + \item \xmlNode{outputLen}, \xmlDesc{integer, optional field}, maximum value of the temporal variable for the generated typical history + \default{Maximum value of the variable with name of \xmlNode{pivotParameter}} +\end{itemize} + +For example, consider history of data collected over three years in one-second increments, +where the user wants a single \emph{typical year} extracted from the data. +The user wants this data constructed by combining twelve equal \emph{typical month} +segments. In this case, the parameter \xmlNode{outputLen} should be \texttt{31536000} (the number of seconds +in a year), while the parameter \xmlNode{subseqLen} should be \texttt{2592000} (the number of seconds in a +month). Using a value for \xmlNode{subseqLen} that is either much, much smaller than \xmlNode{outputLen} or +of equal size to \xmlNode{outputLen} might have unexpected results. In general, we recommend using a +\xmlNode{subseqLen} that is roughly an order of magnitude smaller than \xmlNode{outputLen}. diff --git a/doc/user_manual/PostProcessors/dataObjectLabelFilter.tex b/doc/user_manual/PostProcessors/dataObjectLabelFilter.tex new file mode 100644 index 0000000000..d4f79f3a97 --- /dev/null +++ b/doc/user_manual/PostProcessors/dataObjectLabelFilter.tex @@ -0,0 +1,19 @@ +\subsubsection{dataObjectLabelFilter} +\label{dataObjectLabelFilter} + +This Post-Processor allows to filter the portion of a dataObject, either PointSet or HistorySet, with a given clustering label. +A clustering algorithm associates a unique cluster label to each element of the dataObject (PointSet or HistorySet). +This cluster label is a natural number ranging from $0$ (or $1$ depending on the algorithm) to $N$ where $N$ is the number of obtained clusters. +Recall that some clustering algorithms (e.g., K-Means) receive $N$ as input while others (e.g., Mean-Shift) determine $N$ after clustering has been performed. +Thus, this Post-Processor is naturally employed after a data-mining clustering techniques has been performed on a dataObject so that each clusters +can be analyzed separately. + +\ppType{dataObjectLabelFilter}{dataObjectLabelFilter} + +In the \xmlNode{PostProcessor} input block, the following XML sub-nodes are required, +independently of the \xmlAttr{subType} specified: + +\begin{itemize} + \item \xmlNode{label}, \xmlDesc{string, required field}, name of the clustering label + \item \xmlNode{clusterIDs}, \xmlDesc{integers, required field}, ID of the selected clusters. Note that more than one ID can be provided as input +\end{itemize} diff --git a/doc/user_manual/postprocessor.tex b/doc/user_manual/postprocessor.tex index 6f523fdc21..e9e247a9f7 100644 --- a/doc/user_manual/postprocessor.tex +++ b/doc/user_manual/postprocessor.tex @@ -19,7 +19,6 @@ \subsection{PostProcessor} \item \textbf{LimitSurfaceIntegral} \item \textbf{External} \item \textbf{TopologicalDecomposition} - %\item \textbf{RavenOutput} \item \textbf{DataMining} \item \textbf{RiskMeasureDiscrete} \item \textbf{Metric} @@ -1087,26 +1086,6 @@ \subsubsection{TopologicalDecomposition} %%%%% PP DataMining %%%%%%% \input{PostProcessors/DataMining.tex} -%%%%% PP PrintCSV %%%%%%% -%\paragraph{PrintCSV} -%\label{PrintCSV} -%TO BE MOVED TO STEP ``IOSTEP'' -%%%%% PP LoadCsvIntoInternalObject %%%%%%% -%\paragraph{LoadCsvIntoInternalObject} -%\label{LoadCsvIntoInternalObject} -%TO BE MOVED TO STEP ``IOSTEP'' -% - -%%%%% Risk Measures Discrete PP %%%%%%%%%% -\input{PostProcessors/DiscreteRiskMeasures.tex} - -%%%%% HistorySetDelay %%%%%% -\input{PostProcessors/HistorySetDelay.tex} - -%%%%% PP External %%%%%%% -\input{PostProcessors/InterfacedPostProcessors.tex} - - %%%%%%%%%%%%%% ParetoFrontier PP %%%%%%%%%%%%%%%%%%% \subsubsection{ParetoFrontier} @@ -2209,3 +2188,35 @@ \subsubsection{SampleSelector} %%%%% PP EconomicRatio %%%%%%% \input{EconomicRatio.tex} + +%%%%% Risk Measures Discrete PP %%%%%%%%%% +\input{PostProcessors/DiscreteRiskMeasures.tex} + +%%%%% HistorySetDelay %%%%%% +\input{PostProcessors/HistorySetDelay.tex} + +%%%%% HStoPSOperator %%%%%% +\input{PostProcessors/HStoPSOperator.tex} + +%%%%% HistorySetSampling %%%%%% +\input{PostProcessors/HistorySetSampling.tex} + +%%%%% HistorySetSync %%%%%% +\input{PostProcessors/HistorySetSync.tex} + +%%%%% HistorySetSnapShot %%%%%% +\input{PostProcessors/HistorySetSnapShot.tex} + +%%%%% HS2PS %%%%%% +\input{PostProcessors/HS2PS.tex} + +%%%%% TypicalHistoryFromHistorySet %%%%%% +\input{PostProcessors/TypicalHistoryFromHistorySet.tex} + +%%%%% dataObjectLabelFilter %%%%%% +\input{PostProcessors/dataObjectLabelFilter.tex} + +%%%%%%%%%%%%%% InterfacedPostProcessors %%%%%%%%%%%%%%%% +% To be replaced by the PostProcessor Plugin +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%\input{PostProcessors/InterfacedPostProcessors.tex} From bf6a4a067a231dd7fa30d3bcc9cf9526b05d861b Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 7 May 2021 16:23:54 -0600 Subject: [PATCH 47/51] update plugin --- plugins/HERON | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/HERON b/plugins/HERON index 1257af23da..12ce389efc 160000 --- a/plugins/HERON +++ b/plugins/HERON @@ -1 +1 @@ -Subproject commit 1257af23da7908c86172d08b9c409d85d0c1af5d +Subproject commit 12ce389efce1253e1a5f6d795ddd76d272be6734 From 874944ab29155232b42efa0dd262949b3144f41d Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Fri, 7 May 2021 16:29:19 -0600 Subject: [PATCH 48/51] revert HEROM commit --- plugins/HERON | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/HERON b/plugins/HERON index 12ce389efc..8c8be8139b 160000 --- a/plugins/HERON +++ b/plugins/HERON @@ -1 +1 @@ -Subproject commit 12ce389efce1253e1a5f6d795ddd76d272be6734 +Subproject commit 8c8be8139b52e8bf395508b0097b137369d32df4 From 00db572601c5d16394157ee8ad6dd9c3b00e0364 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Mon, 10 May 2021 20:40:29 -0600 Subject: [PATCH 49/51] update doc --- doc/user_manual/PostProcessors/HS2PS.tex | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/user_manual/PostProcessors/HS2PS.tex b/doc/user_manual/PostProcessors/HS2PS.tex index 699412a418..b49f406244 100644 --- a/doc/user_manual/PostProcessors/HS2PS.tex +++ b/doc/user_manual/PostProcessors/HS2PS.tex @@ -3,8 +3,9 @@ \subsubsection{HS2PS} This Post-Processor performs a conversion from HistorySet to PointSet. The conversion is made so that each history $H$ is converted to a single point $P$. -Assume that each history $H$ is a dict of $n$ output variables $x_1=[...],x_n=[...]$, then the resulting point $P$ is $P=concat(x_1,...,x_n)$. -Note: it is here assumed that all histories have been sync so that they have the same length, start point and end point. If you are not sure, do a pre-processing the the original history set. +Assume that each history $H$ is a dictionary (mapping) of $n$ output variables $x_1=[...],x_n=[...]$, then the resulting point $P$ is $P=concat(x_1,...,x_n)$. +Note: it is here assumed that all histories have been synced so that they have the same length, start point and end point. +If you are not sure, do a pre-processing the the original history set. \ppType{HS2PS}{HS2PS} From f58071b9fdf1bca095f2b3bcbc75f56fc36d8542 Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Tue, 11 May 2021 15:48:27 -0600 Subject: [PATCH 50/51] resolve plugin configure file issue within plugin_handler.py --- scripts/plugin_handler.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/plugin_handler.py b/scripts/plugin_handler.py index 81a39710d9..45cce1b6a1 100755 --- a/scripts/plugin_handler.py +++ b/scripts/plugin_handler.py @@ -125,8 +125,10 @@ def tellPluginAboutRaven(loc): if ravenLoc is None: ravenLoc = xmlUtils.newNode('FrameworkLocation') root.append(ravenLoc) - ravenLoc.text = os.path.abspath(os.path.expanduser(frameworkDir)) - xmlUtils.toFile(configFile, root) + ravenFrameworkLoc = os.path.abspath(os.path.expanduser(frameworkDir)) + if ravenLoc.text != ravenFrameworkLoc: + ravenLoc.text = ravenFrameworkLoc + xmlUtils.toFile(configFile, root) return ravenLoc.text def loadPluginTree(): From 5f32bb6dbe4459add4ab22372e6f56b199e7858b Mon Sep 17 00:00:00 2001 From: "Wang, Congjian" Date: Tue, 11 May 2021 15:57:31 -0600 Subject: [PATCH 51/51] update plugin base class location --- framework/Models/PostProcessors/PostProcessorFunctions/HS2PS.py | 2 +- .../PostProcessors/PostProcessorFunctions/HStoPSOperator.py | 2 +- .../PostProcessors/PostProcessorFunctions/HistorySetDelay.py | 2 +- .../PostProcessors/PostProcessorFunctions/HistorySetSampling.py | 2 +- .../PostProcessors/PostProcessorFunctions/HistorySetSnapShot.py | 2 +- .../PostProcessors/PostProcessorFunctions/HistorySetSync.py | 2 +- .../PostProcessorFunctions/TypicalHistoryFromHistorySet.py | 2 +- .../PostProcessorFunctions/dataObjectLabelFilter.py | 2 +- .../PostProcessors/PostProcessorFunctions/testInterfacedPP.py | 2 +- .../PostProcessorFunctions/testInterfacedPP_PointSet.py | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/framework/Models/PostProcessors/PostProcessorFunctions/HS2PS.py b/framework/Models/PostProcessors/PostProcessorFunctions/HS2PS.py index c3a6827308..d08bc85470 100644 --- a/framework/Models/PostProcessors/PostProcessorFunctions/HS2PS.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/HS2PS.py @@ -21,7 +21,7 @@ #Internal Modules--------------------------------------------------------------- from utils import InputData, InputTypes -from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase +from PluginBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase #Internal Modules End----------------------------------------------------------- class HS2PS(PostProcessorPluginBase): diff --git a/framework/Models/PostProcessors/PostProcessorFunctions/HStoPSOperator.py b/framework/Models/PostProcessors/PostProcessorFunctions/HStoPSOperator.py index 1cb29e3db4..1e5d2b386b 100644 --- a/framework/Models/PostProcessors/PostProcessorFunctions/HStoPSOperator.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/HStoPSOperator.py @@ -23,7 +23,7 @@ #External Modules End-------------------------------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase +from PluginBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes #Internal Modules End----------------------------------------------------------- diff --git a/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetDelay.py b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetDelay.py index d97d8bda44..1d98d24544 100644 --- a/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetDelay.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetDelay.py @@ -24,7 +24,7 @@ import xarray as xr from utils import InputData, InputTypes -from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase +from PluginBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase class HistorySetDelay(PostProcessorPluginBase): """ diff --git a/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSampling.py b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSampling.py index 50e3c54918..fef49da49b 100644 --- a/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSampling.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSampling.py @@ -22,7 +22,7 @@ from scipy import integrate import copy -from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase +from PluginBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes class HistorySetSampling(PostProcessorPluginBase): diff --git a/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSnapShot.py b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSnapShot.py index b85dc71618..f28a540ba5 100644 --- a/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSnapShot.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSnapShot.py @@ -21,7 +21,7 @@ import copy import importlib -from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase +from PluginBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase # import HistorySetSync as HSS from Models.PostProcessors import Factory as interfaceFactory from utils import InputData, InputTypes diff --git a/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSync.py b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSync.py index 348f123c8e..51d79b4b62 100644 --- a/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSync.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/HistorySetSync.py @@ -22,7 +22,7 @@ import numpy as np #External Modules End-------------------------------------------------------------------------------- -from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase +from PluginBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes class HistorySetSync(PostProcessorPluginBase): diff --git a/framework/Models/PostProcessors/PostProcessorFunctions/TypicalHistoryFromHistorySet.py b/framework/Models/PostProcessors/PostProcessorFunctions/TypicalHistoryFromHistorySet.py index f7e81f2978..11f806b15c 100644 --- a/framework/Models/PostProcessors/PostProcessorFunctions/TypicalHistoryFromHistorySet.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/TypicalHistoryFromHistorySet.py @@ -20,7 +20,7 @@ from collections import defaultdict from functools import partial -from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase +from PluginBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import mathUtils, utils, InputData, InputTypes class TypicalHistoryFromHistorySet(PostProcessorPluginBase): diff --git a/framework/Models/PostProcessors/PostProcessorFunctions/dataObjectLabelFilter.py b/framework/Models/PostProcessors/PostProcessorFunctions/dataObjectLabelFilter.py index e8da63d4eb..9621b02095 100644 --- a/framework/Models/PostProcessors/PostProcessorFunctions/dataObjectLabelFilter.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/dataObjectLabelFilter.py @@ -20,7 +20,7 @@ from scipy import interpolate import copy -from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase +from PluginBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes class dataObjectLabelFilter(PostProcessorPluginBase): diff --git a/framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP.py b/framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP.py index cc5f4d02eb..eba86bd3dc 100644 --- a/framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP.py @@ -18,7 +18,7 @@ import copy import numpy as np -from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase +from PluginBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes class testInterfacedPP(PostProcessorPluginBase): diff --git a/framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP_PointSet.py b/framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP_PointSet.py index 918bace454..9ba13dfa00 100644 --- a/framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP_PointSet.py +++ b/framework/Models/PostProcessors/PostProcessorFunctions/testInterfacedPP_PointSet.py @@ -16,7 +16,7 @@ ''' import copy -from PluginsBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase +from PluginBaseClasses.PostProcessorPluginBase import PostProcessorPluginBase from utils import InputData, InputTypes class testInterfacedPP_PointSet(PostProcessorPluginBase):