diff --git a/framework/CodeInterfaces/RAVEN/RAVENInterface.py b/framework/CodeInterfaces/RAVEN/RAVENInterface.py index 1d2d276094..3f02fca7fd 100644 --- a/framework/CodeInterfaces/RAVEN/RAVENInterface.py +++ b/framework/CodeInterfaces/RAVEN/RAVENInterface.py @@ -95,12 +95,12 @@ def _readMoreXML(self,xmlNode): # check for existence if not os.path.exists(source): raise IOError(self.printTag+' ERROR: the conversionModule "{}" was not found!' - .format(self.extModForVarsManipulationPath)) + .format(source)) # check module is imported checkImport = utils.importFromPath(source) if checkImport is None: raise IOError(self.printTag+' ERROR: the conversionModule "{}" failed on import!' - .format(self.extModForVarsManipulationPath)) + .format(source)) # check methods are in place noScalar = 'convertNotScalarSampledVariables' in checkImport.__dict__ scalar = 'manipulateScalarSampledVariables' in checkImport.__dict__ diff --git a/framework/CustomModes/MPISimulationMode.py b/framework/CustomModes/MPISimulationMode.py index f7121e951c..88828b57a3 100644 --- a/framework/CustomModes/MPISimulationMode.py +++ b/framework/CustomModes/MPISimulationMode.py @@ -66,6 +66,7 @@ def modifyInfo(self, runInfoDict): nodefile = os.environ["PBS_NODEFILE"] else: nodefile = self.__nodefile + self.raiseADebug('Setting up remote nodes based on "{}"'.format(nodefile)) lines = open(nodefile,"r").readlines() #XXX This is an undocumented way to pass information back newRunInfo['Nodes'] = list(lines) @@ -79,6 +80,7 @@ def modifyInfo(self, runInfoDict): newRunInfo['batchSize'] = maxBatchsize self.raiseAWarning("changing batchsize from "+str(oldBatchsize)+" to "+str(maxBatchsize)+" to fit on "+str(len(lines))+" processors") newBatchsize = newRunInfo['batchSize'] + self.raiseADebug('Batch size is "{}"'.format(newBatchsize)) if newBatchsize > 1: #need to split node lines so that numMPI nodes are available per run workingDir = runInfoDict['WorkingDir'] @@ -121,20 +123,23 @@ def __createAndRunQSUB(self, runInfoDict): @ Out, remoteRunCommand, dict, dictionary of command. """ # Check if the simulation has been run in PBS mode and, in case, construct the proper command - #while true, this is not the number that we want to select + # determine the cores needed for the job if self.__coresNeeded is not None: coresNeeded = self.__coresNeeded else: coresNeeded = runInfoDict['batchSize']*runInfoDict['NumMPI'] + # get the requested memory, if any if self.__memNeeded is not None: memString = ":mem="+self.__memNeeded else: memString = "" - #batchSize = runInfoDict['batchSize'] + # raven/framework location frameworkDir = runInfoDict["FrameworkDir"] + # number of "threads" ncpus = runInfoDict['NumThreads'] + # job title jobName = runInfoDict['JobName'] if 'JobName' in runInfoDict.keys() else 'raven_qsub' - #check invalid characters + ## fix up job title validChars = set(string.ascii_letters).union(set(string.digits)).union(set('-_')) if any(char not in validChars for char in jobName): raise IOError('JobName can only contain alphanumeric and "_", "-" characters! Received'+jobName) @@ -142,20 +147,26 @@ def __createAndRunQSUB(self, runInfoDict): if len(jobName) > 15: jobName = jobName[:10]+'-'+jobName[-4:] print('JobName is limited to 15 characters; truncating to '+jobName) - #Generate the qsub command needed to run input + # Generate the qsub command needed to run input + ## raven_framework location + raven = os.path.abspath(os.path.join(frameworkDir,'..','raven_framework')) + ## generate the command, which will be passed into "args" of subprocess.call command = ["qsub","-N",jobName]+\ runInfoDict["clusterParameters"]+\ ["-l", - "select="+str(coresNeeded)+":ncpus="+str(ncpus)+":mpiprocs=1"+memString, + "select={}:ncpus={}:mpiprocs=1{}".format(coresNeeded,ncpus,memString), "-l","walltime="+runInfoDict["expectedTime"], "-l","place="+self.__place,"-v", - 'COMMAND="../raven_framework '+ + 'COMMAND="{} '.format(raven)+ " ".join(runInfoDict["SimulationFiles"])+'"', runInfoDict['RemoteRunCommand']] - #Change to frameworkDir so we find raven_qsub_command.sh + # Set parameters for the run command remoteRunCommand = {} - remoteRunCommand["cwd"] = frameworkDir + ## directory to start in, where the input file is + remoteRunCommand["cwd"] = runInfoDict['InputDir'] + ## command to run in that directory remoteRunCommand["args"] = command + ## print out for debugging print("remoteRunCommand",remoteRunCommand) return remoteRunCommand diff --git a/framework/Simulation.py b/framework/Simulation.py index ddb7c322b1..41bba4ac99 100644 --- a/framework/Simulation.py +++ b/framework/Simulation.py @@ -516,6 +516,7 @@ def initialize(self): @ Out, None """ #move the full simulation environment in the working directory + self.raiseADebug('Moving to working directory:',self.runInfoDict['WorkingDir']) os.chdir(self.runInfoDict['WorkingDir']) #add also the new working dir to the path sys.path.append(os.getcwd()) @@ -608,6 +609,9 @@ def __readRunInfo(self,xmlNode,runInfoSkip,xmlFilename): else: self.runInfoDict['printInput'] = text+'.xml' elif element.tag == 'WorkingDir': + # first store the cwd, the "CallDir" + self.runInfoDict['CallDir'] = os.getcwd() + # then get the requested "WorkingDir" tempName = element.text if '~' in tempName: tempName = os.path.expanduser(tempName) @@ -618,8 +622,11 @@ def __readRunInfo(self,xmlNode,runInfoSkip,xmlFilename): else: if xmlFilename == None: self.raiseAnError(IOError,'Relative working directory requested but xmlFilename is None.') + # store location of the input xmlDirectory = os.path.dirname(os.path.abspath(xmlFilename)) + self.runInfoDict['InputDir'] = xmlDirectory rawRelativeWorkingDir = element.text.strip() + # working dir is file location + relative working dir self.runInfoDict['WorkingDir'] = os.path.join(xmlDirectory,rawRelativeWorkingDir) utils.makeDir(self.runInfoDict['WorkingDir']) elif element.tag == 'maxQueueSize': diff --git a/framework/raven_qsub_command.sh b/framework/raven_qsub_command.sh index 88ed93576d..e78ddfeeef 100755 --- a/framework/raven_qsub_command.sh +++ b/framework/raven_qsub_command.sh @@ -1,6 +1,7 @@ #!/bin/bash if test -n "$PBS_O_WORKDIR"; then + echo Moving to working dir: ${PBS_O_WORKDIR} cd $PBS_O_WORKDIR fi @@ -10,11 +11,8 @@ module load MVAPICH2/2.0.1-GCC-4.9.2 ## also the name of the raven libraries conda environment source activate raven_libraries -echo `conda env list` -echo DEBUGG HERE IN RQC -conda list - which python which mpiexec +echo '' echo $COMMAND $COMMAND diff --git a/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/ext_dataobjects.xml b/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/ext_dataobjects.xml new file mode 100644 index 0000000000..547f6d322b --- /dev/null +++ b/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/ext_dataobjects.xml @@ -0,0 +1,34 @@ + + + DeltaTimeScramToAux,DG1recoveryTime + OutputPlaceHolder + + + DeltaTimeScramToAux,DG1recoveryTime + CladTempThreshold,time + + + DeltaTimeScramToAux,DG1recoveryTime + CladTempThreshold + + + DeltaTimeScramToAux,DG1recoveryTime + OutputPlaceHolder + + + DeltaTimeScramToAux,DG1recoveryTime + OutputPlaceHolder + + + DeltaTimeScramToAux,DG1recoveryTime + + + + DeltaTimeScramToAux,DG1recoveryTime + CladTempThreshold,time + + + DeltaTimeScramToAux,DG1recoveryTime + CladTempThreshold + + diff --git a/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/testConversionModule.py b/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/testConversionModule.py new file mode 100755 index 0000000000..5cf836b60f --- /dev/null +++ b/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/testConversionModule.py @@ -0,0 +1,31 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +def manipulateScalarSampledVariables(sampledVars): + """ + This method is aimed to manipulate scalar variables. + The user can create new variables based on the + variables sampled by RAVEN + @ In, sampledVars, dict, dictionary of + sampled variables ({"var1":value1,"var2":value2}) + @ Out, None, the new variables should be + added in the "sampledVariables" dictionary + """ + sampledVars['Models|ROM@subType:SciKitLearn@name:ROM1|coef0'] = sampledVars['Models|ROM@subType:SciKitLearn@name:ROM1|C']/10.0 + + + + diff --git a/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/test_rom_trainer.xml b/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/test_rom_trainer.xml new file mode 100644 index 0000000000..65c38b0adf --- /dev/null +++ b/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/test_rom_trainer.xml @@ -0,0 +1,174 @@ + + + + + test_rom_trainer + MC_for_rom_trainer, test_extract_for_rom_trainer, + test_rom_trainer, test_rom_trainerHS, + rom_MC, rom_MCHS, + test_rom_trainer_nd_interp,rom_MC_nd_interpolator + 1 + + + + + 0 + 2000 + + + 0 + 1000 + + + + + + DeltaTimeScramToAux,DG1recoveryTime,time,CladTempThreshold + + + DeltaTimeScramToAux,DG1recoveryTime + CladTempThreshold + svm|SVR + linear + 10.0 + 0.0001 + 0.0 + + + DeltaTimeScramToAux,DG1recoveryTime + CladTempThreshold,time + svm|SVR + linear + 10.0 + 0.0001 + 0.0 + + + DeltaTimeScramToAux,DG1recoveryTime + CladTempThreshold +

3

+
+
+ + + + + 3 + + + auxbackup + + 500 + + + + auxbackup + 0.5 1.0 0.0 + + 500 + + + + 3 + + + auxbackup + + + DG1backup + + + + + 3 + 200286 + + + auxbackup + + + DG1backup + + + + + + + inputHolder + PythonModule + RAVENmcCode3 + MC_TEST_EXTRACT_STEP_FOR_ROM_TRAINER + + + MC_TEST_EXTRACT_STEP_FOR_ROM_TRAINER + MC_TEST_EXTRACT_STEP_FOR_ROM_TRAINER + Pointset_from_database_for_rom_trainer + Historyset_from_database_for_rom_trainer + ciccio + ciccioHS + + + Pointset_from_database_for_rom_trainer + ROM1 + + + Historyset_from_database_for_rom_trainer + ROMHS + + + Pointset_from_database_for_rom_trainer + ROM2 + + + data_for_sampling_empty_at_begin + ROM1 + gridRom + outputMontecarloRom + outputMontecarloRom_dump + + + data_for_sampling_empty_at_begin + ROMHS + gridRom + outputMontecarloRomHS + outputMontecarloRomHS_dump + + + data_for_sampling_empty_at_begin_nd + ROM2 + RAVENmcND + outputMontecarloRomND + outputMontecarloRomND_dump + + + + + + csv + outputMontecarloRom + + + csv + outputMontecarloRomHS + + + csv + outputMontecarloRomND + + + csv + Pointset_from_database_for_rom_trainer + + + csv + Historyset_from_database_for_rom_trainer + + + + + + + + + +
diff --git a/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/test_rom_trainer/TMI_fake.py b/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/test_rom_trainer/TMI_fake.py new file mode 100644 index 0000000000..5243f6b8e1 --- /dev/null +++ b/tests/cluster_tests/RavenRunsRaven/raven_running_raven_internal_models/test_rom_trainer/TMI_fake.py @@ -0,0 +1,27 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +import numpy + +def run(self, Input): + number_of_steps = 16 + self.time = numpy.zeros(number_of_steps) + DeltaTimeScramToAux = Input["DeltaTimeScramToAux"] + DG1recoveryTime = Input["DG1recoveryTime"] + self.CladTempThreshold = numpy.zeros(number_of_steps) + self.UpperPlenumEnergy= numpy.zeros(number_of_steps) + for i in range(len(self.time)): + self.time[i] = 0.25*i + self.CladTempThreshold[i] = self.time[i]*50.0 + DeltaTimeScramToAux*200.0 + DG1recoveryTime*500.0 + self.UpperPlenumEnergy[i] = self.time[i]*5.0 + DeltaTimeScramToAux*30.0 + DG1recoveryTime*40.0 + DeltaTimeScramToAux*DG1recoveryTime*5.0 diff --git a/tests/cluster_tests/RavenRunsRaven/test_raven_running_raven_int_models.xml b/tests/cluster_tests/RavenRunsRaven/test_raven_running_raven_int_models.xml new file mode 100644 index 0000000000..6779cd9e22 --- /dev/null +++ b/tests/cluster_tests/RavenRunsRaven/test_raven_running_raven_int_models.xml @@ -0,0 +1,131 @@ + + + + framework/CodeInterfaceTests.RAVENrunningRAVEN_ROM + alfoa + 2017-09-16 + Models.Code.RAVEN + + This test is aimed to check the functionality of the RAVEN code interface (RAVEN running RAVEN). It tests the MPI implementation of the + SLAVE RAVEN runs. In this case, 3 simultaneous SLAVE RAVEN (batchSize=3) runs are going to be spawned, each of them using 2 processors (NumMPI=2). + The NumMPI XML node MUST BE INPUTTED if the SLAVE RAVEN runs must run in multiple processors! + + + + + raven_running_raven_internal_models + FirstMRun + + 5 + mpi + 0:10:0 + -P nst -j oe + + + + test_rom_trainer.xml + ext_dataobjects.xml + + + + + + test_rom_trainer.xml + ext_dataobjects.xml + raven_running_rom + MC_external + testPrintHistorySet + test_external_db + testPrintHistorySet_dump + testPointSet + testPointSet_dump + + + + + + + %FRAMEWORK_DIR%/../raven_framework + outputMontecarloRom_dump,outputMontecarloRomHS_dump + + + Models|ROM@subType:SciKitLearn@name:ROM1|C + + + Models|ROM@subType:SciKitLearn@name:ROM1|C + Models|ROM@subType:SciKitLearn@name:ROM1|tol + Samplers|Grid@name:gridRom|constant@name:DG1recoveryTime + + + + + + + + 4 + 0.1 + + + 0.0001 + 0.000001 + + + 0 + 1000 + + + + + + + + 5 + + + C_distrib + + + toll_distrib + + + DG1backup + + + + + + + + + + + + + csv + testPrintHistorySet + input,output + + + csv + testPointSet + input,output + + + + + + + loss_factor,tollerance + OutputPlaceHolder + + + loss_factor,DeltaTimeScramToAux,tollerance,DG1recoveryTime + CladTempThreshold + + + loss_factor,DeltaTimeScramToAux,tollerance,DG1recoveryTime + CladTempThreshold + + + + diff --git a/tests/cluster_tests/test_pbspro.sh b/tests/cluster_tests/test_pbspro.sh index 3160bea515..58aba28786 100755 --- a/tests/cluster_tests/test_pbspro.sh +++ b/tests/cluster_tests/test_pbspro.sh @@ -37,55 +37,41 @@ wait_lines () fails=$fails', '$NAME num_fails=$(($num_fails+1)) printf '\n\nStandard Error:\n' - cat $RAVEN_FRAMEWORK_DIR/test_qsub.e* + cat test_qsub.e* printf '\n\nStandard Output:\n' - cat $RAVEN_FRAMEWORK_DIR/test_qsub.o* + cat test_qsub.o* fi - rm $RAVEN_FRAMEWORK_DIR/test_qsub.[eo]* + rm test_qsub.[eo]* } rm -Rf FirstMQRun/ - #REQUIREMENT_TEST R-IS-7 python ../../framework/Driver.py test_mpiqsub_local.xml pbspro_mpi.xml cluster_runinfo.xml - wait_lines 'FirstMQRun/[1-6]/*test.csv' 6 mpiqsub rm -Rf FirstMNRun/ - python ../../framework/Driver.py test_mpiqsub_nosplit.xml cluster_runinfo.xml - wait_lines 'FirstMNRun/[1-6]/*.csv' 6 mpiqsub_nosplit rm -Rf FirstMLRun/ - python ../../framework/Driver.py test_mpiqsub_limitnode.xml cluster_runinfo.xml - wait_lines 'FirstMLRun/[1-6]/*.csv' 6 mpiqsub_limitnode rm -Rf FirstMRun/ - qsub -P moose -l select=6:ncpus=4:mpiprocs=1 -l walltime=10:00:00 -l place=free -W block=true ./run_mpi_test.sh - wait_lines 'FirstMRun/[1-6]/*test.csv' 6 mpi rm -Rf FirstPRun/ - python ../../framework/Driver.py test_pbs.xml cluster_runinfo.xml - wait_lines 'FirstPRun/[1-6]/*test.csv' 6 pbsdsh rm -Rf FirstMFRun/ - python ../../framework/Driver.py test_mpiqsub_flex.xml cluster_runinfo.xml - wait_lines 'FirstMFRun/[1-6]/*.csv' 6 mpiqsub_flex rm -Rf FirstMForcedRun/ - python ../../framework/Driver.py test_mpiqsub_forced.xml cluster_runinfo.xml - wait_lines 'FirstMForcedRun/[1-6]/*.csv' 6 mpiqsub_forced ###################################### @@ -94,84 +80,60 @@ wait_lines 'FirstMForcedRun/[1-6]/*.csv' 6 mpiqsub_forced # first stes (external model in parallel) cd InternalParallel/ rm -Rf InternalParallelExtModel/*.csv - #REQUIREMENT_TEST R-IS-8 python ../../../framework/Driver.py test_internal_parallel_extModel.xml ../pbspro_mpi.xml ../cluster_runinfo.xml - wait_lines 'InternalParallelExtModel/*.csv' 28 paralExtModel - cd .. # second test (ROM in parallel) cd InternalParallel/ rm -Rf InternalParallelScikit/*.csv - #REQUIREMENT_TEST R-IS-9 python ../../../framework/Driver.py test_internal_parallel_ROM_scikit.xml ../pbspro_mpi.xml ../cluster_runinfo.xml - wait_lines 'InternalParallelScikit/*.csv' 2 paralROM - cd .. # third test (PostProcessor in parallel) cd InternalParallel/ rm -Rf InternalParallelPostProcessorLS/*.csv - python ../../../framework/Driver.py test_internal_parallel_PP_LS.xml ../pbspro_mpi.xml ../cluster_runinfo.xml - wait_lines 'InternalParallelPostProcessorLS/*.csv' 4 parallelPP - cd .. # forth test (Topology Picard in parallel) cd InternalParallel/ rm -Rf InternalParallelMSR/*.csv - python ../../../framework/Driver.py test_internal_MSR.xml ../pbspro_mpi.xml ../cluster_runinfo.xml - wait_lines 'InternalParallelMSR/*.csv' 1 parallelMSR - cd .. # fifth test (Ensamble Model Picard in parallel) cd InternalParallel/ rm -Rf metaModelNonLinearParallel/*.png - python ../../../framework/Driver.py test_ensemble_model_picard_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo.xml - wait_lines 'metaModelNonLinearParallel/*.png' 3 parallelEnsemblePicard - cd .. # sixth test (Ensamble Model Linear Picard in parallel) cd InternalParallel/ rm -Rf metaModelLinearParallel/*.png - python ../../../framework/Driver.py test_ensemble_model_linear_internal_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo.xml - wait_lines 'metaModelLinearParallel/*.png' 2 parallelEnsembleLinear - cd .. # seven test (HybridModel Code in parallel) cd InternalParallel/ rm -Rf hybridModelCode/*.csv - python ../../../framework/Driver.py test_hybrid_model_code.xml ../pbspro_mpi.xml ../cluster_runinfo.xml - wait_lines 'hybridModelCode/*.csv' 1 parallelHybridModelCode - cd .. # eighth test (HybridModel External Model in parallel) cd InternalParallel/ rm -Rf hybridModelExternal/*.csv - python ../../../framework/Driver.py test_hybrid_model_external.xml ../pbspro_mpi.xml ../cluster_runinfo.xml - wait_lines 'hybridModelExternal/*.csv' 1 parallelHybridModelExternal - cd .. ############################################ @@ -184,13 +146,17 @@ cd .. cd AdaptiveSobol/ rm -Rf workdir/* - python ../../../framework/Driver.py test_adapt_sobol_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo.xml - wait_lines 'workdir/*.csv' 1 adaptiveSobol - cd .. +# Raven-Running-Raven (RAVEN code interface) +cd RavenRunsRaven/raven_running_raven_internal_models/ +rm -Rf FirstMRun DatabaseStorage *csv testPointSet_dump.xml +cd .. +../../../raven_framework test_raven_running_raven_int_models.xml ../pbspro_mpi.xml ../cluster_runinfo.xml +wait_lines 'raven_running_raven_internal_models/testP*.csv' 17 ravenRunningRaven +cd .. if test $num_fails -eq 0; then echo ALL PASSED diff --git a/tests/cluster_tests/test_qsubs.sh b/tests/cluster_tests/test_qsubs.sh index 2cf29c3a73..4333723d0e 100755 --- a/tests/cluster_tests/test_qsubs.sh +++ b/tests/cluster_tests/test_qsubs.sh @@ -51,49 +51,43 @@ echo Removing old databases... rm -Rf DatabaseStorage/ rm -Rf FirstMQRun/ - #REQUIREMENT_TEST R-IS-7 -python ../../framework/Driver.py test_mpiqsub_local.xml pbspro_mpi.xml cluster_runinfo_legacy.xml - +../../raven_framework test_mpiqsub_local.xml pbspro_mpi.xml cluster_runinfo_legacy.xml wait_lines 'FirstMQRun/[1-6]/*test.csv' 6 mpiqsub +echo '' rm -Rf FirstMNRun/ - -python ../../framework/Driver.py test_mpiqsub_nosplit.xml cluster_runinfo_legacy.xml - +../../raven_framework test_mpiqsub_nosplit.xml cluster_runinfo_legacy.xml wait_lines 'FirstMNRun/[1-6]/*.csv' 6 mpiqsub_nosplit +echo '' rm -Rf FirstMLRun/ - -python ../../framework/Driver.py test_mpiqsub_limitnode.xml cluster_runinfo_legacy.xml - +../../raven_framework test_mpiqsub_limitnode.xml cluster_runinfo_legacy.xml wait_lines 'FirstMLRun/[1-6]/*.csv' 6 mpiqsub_limitnode +echo '' rm -Rf FirstMRun/ echo '' echo 'Running interactive MPI test ...' qsub -P moose -l select=6:ncpus=4:mpiprocs=1 -l walltime=10:00:00 -l place=free -W block=true ./run_mpi_test.sh - wait_lines 'FirstMRun/[1-6]/*test.csv' 6 mpi +echo '' rm -Rf FirstPRun/ - -python ../../framework/Driver.py test_pbs.xml cluster_runinfo_legacy.xml - +../../raven_framework test_pbs.xml cluster_runinfo_legacy.xml wait_lines 'FirstPRun/[1-6]/*test.csv' 6 pbsdsh +echo '' rm -Rf FirstMFRun/ - -python ../../framework/Driver.py test_mpiqsub_flex.xml cluster_runinfo_legacy.xml - +../../raven_framework test_mpiqsub_flex.xml cluster_runinfo_legacy.xml wait_lines 'FirstMFRun/[1-6]/*.csv' 6 mpiqsub_flex +echo '' rm -Rf FirstMForcedRun/ - -python ../../framework/Driver.py test_mpiqsub_forced.xml cluster_runinfo_legacy.xml - +../../raven_framework test_mpiqsub_forced.xml cluster_runinfo_legacy.xml wait_lines 'FirstMForcedRun/[1-6]/*.csv' 6 mpiqsub_forced +echo '' ###################################### # test parallel for internal Objects # @@ -101,85 +95,68 @@ wait_lines 'FirstMForcedRun/[1-6]/*.csv' 6 mpiqsub_forced # first stes (external model in parallel) cd InternalParallel/ rm -Rf InternalParallelExtModel/*.csv - #REQUIREMENT_TEST R-IS-8 -python ../../../framework/Driver.py test_internal_parallel_extModel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml - +../../../raven_framework test_internal_parallel_extModel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml wait_lines 'InternalParallelExtModel/*.csv' 28 paralExtModel - cd .. +echo '' # second test (ROM in parallel) cd InternalParallel/ rm -Rf InternalParallelScikit/*.csv - #REQUIREMENT_TEST R-IS-9 -python ../../../framework/Driver.py test_internal_parallel_ROM_scikit.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml - +../../../raven_framework test_internal_parallel_ROM_scikit.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml wait_lines 'InternalParallelScikit/*.csv' 2 paralROM - cd .. +echo '' # third test (PostProcessor in parallel) cd InternalParallel/ rm -Rf InternalParallelPostProcessorLS/*.csv - -python ../../../framework/Driver.py test_internal_parallel_PP_LS.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml - +../../../raven_framework test_internal_parallel_PP_LS.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml wait_lines 'InternalParallelPostProcessorLS/*.csv' 4 parallelPP - cd .. +echo '' # forth test (Topology Picard in parallel) - cd InternalParallel/ rm -Rf InternalParallelMSR/*.csv - -python ../../../framework/Driver.py test_internal_MSR.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml - +../../../raven_framework test_internal_MSR.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml wait_lines 'InternalParallelMSR/*.csv' 1 parallelMSR - cd .. +echo '' # fifth test (Ensamble Model Picard in parallel) cd InternalParallel/ rm -Rf metaModelNonLinearParallel/*.png - -python ../../../framework/Driver.py test_ensemble_model_picard_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml - +../../../raven_framework test_ensemble_model_picard_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml wait_lines 'metaModelNonLinearParallel/*.png' 3 parallelEnsemblePicard - cd .. +echo '' # sixth test (Ensamble Model Picard in parallel) cd InternalParallel/ rm -Rf metaModelLinearParallel/*.png - -python ../../../framework/Driver.py test_ensemble_model_linear_internal_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml - +../../../raven_framework test_ensemble_model_linear_internal_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml wait_lines 'metaModelLinearParallel/*.png' 2 parallelEnsembleLinear - cd .. +echo '' # seven test (HybridModel Code in parallel) cd InternalParallel/ rm -Rf hybridModelCode/*.csv - -python ../../../framework/Driver.py test_hybrid_model_code.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml - +../../../raven_framework test_hybrid_model_code.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml wait_lines 'hybridModelCode/*.csv' 1 parallelHybridModelCode - cd .. +echo '' # eighth test (HybridModel External Model in parallel) cd InternalParallel/ rm -Rf hybridModelExternal/*.csv - -python ../../../framework/Driver.py test_hybrid_model_external.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml - +../../../raven_framework test_hybrid_model_external.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml wait_lines 'hybridModelExternal/*.csv' 1 parallelHybridModelExternal - cd .. +echo '' ############################################ # test parallel for internal Objects ENDED # @@ -189,14 +166,22 @@ cd .. # other parallel objects tests # ################################ +# Adaptive Sobol cd AdaptiveSobol/ rm -Rf workdir/* - -python ../../../framework/Driver.py test_adapt_sobol_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml - +../../../raven_framework test_adapt_sobol_parallel.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml wait_lines 'workdir/*.csv' 1 adaptiveSobol +cd .. +echo '' +# Raven-Running-Raven (RAVEN code interface) +cd RavenRunsRaven/raven_running_raven_internal_models/ +rm -Rf FirstMRun DatabaseStorage *csv testPointSet_dump.xml +cd .. +../../../raven_framework test_raven_running_raven_int_models.xml ../pbspro_mpi.xml ../cluster_runinfo_legacy.xml +wait_lines 'raven_running_raven_internal_models/testP*.csv' 17 ravenRunningRaven cd .. +echo '' if test $num_fails -eq 0; then