Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/devel' into alfoa/mandd/Jimmy-GA…
Browse files Browse the repository at this point in the history
…-ConstraintHandling
  • Loading branch information
mandd committed Jul 7, 2021
2 parents 0a65d50 + 5377394 commit d3563cd
Show file tree
Hide file tree
Showing 23 changed files with 698 additions and 345 deletions.
2 changes: 1 addition & 1 deletion dependencies.xml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Note all install methods after "main" take
<matplotlib>3.2</matplotlib>
<statsmodels/>
<cloudpickle>1.6</cloudpickle>
<tensorflow>1.15</tensorflow>
<tensorflow>2.0</tensorflow>
<python skip_check='True'>3</python>
<hdf5 skip_check='True'/>
<swig skip_check='True'/>
Expand Down
1 change: 1 addition & 0 deletions developer_tools/XSDSchemas/raven.xsd
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@
</xsd:complexType>
</xsd:element>
<xsd:element name="MPIExec" type="xsd:string" minOccurs="0" default="mpiexec"/>
<xsd:element name="threadParameter" type="xsd:string" minOccurs="0" default="--n-threads=%NUM_CPUS%"/>
<xsd:element name="NodeParameter" type="xsd:string" minOccurs="0" default="-f"/>
<xsd:element name="NumMPI" type="xsd:integer" minOccurs="0" default="1"/>
<xsd:element name="totalNumCoresUsed" type="xsd:integer" minOccurs="0" default="1"/>
Expand Down
3 changes: 1 addition & 2 deletions doc/user_guide/ravenRomTrainer.tex
Original file line number Diff line number Diff line change
Expand Up @@ -178,8 +178,7 @@ \subsubsection{How to load and sample a ROM?}
\textbf{Files} object to track the pickled ROM file.
\xmlExample{framework/user_guide/ravenTutorial/RomLoad.xml}{Files}

In this example, the subtype \xmlString{NDinvDistWeight} of \xmlNode{ROM} is used instead of \xmlString{pickledROM},
since the subtype of ROM is already known.
In this example, the subtype \xmlString{pickledROM} of \xmlNode{ROM} is used since the hyper-parameters of the ROM can not be changed once the ROM is loaded from a pickled (serialized) file.
\xmlExample{framework/user_guide/ravenTutorial/RomLoad.xml}{Models}

Two data objects are defined: 1) a \textbf{HistorySet} named ``inputPlaceHolder'' used as a placeholder input for
Expand Down
16 changes: 15 additions & 1 deletion doc/user_manual/runInfo.tex
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,19 @@ \subsection{RunInfo: Input of Calculation Flow}
%
\default{mpiexec}

%%%%%% N THREADS
\item \xmlNode{threadParameter}, \xmlDesc{string, optional field}, specifies the command used to set the
number of threads. The ``%NUM_CPUS%'' is a wildcard that will be replaced by the number of threads
specified in the node \xmlNode{NumThreads}. In this way for commands
that require the number of threads to be inputted without a blank space after this command,
the user can specify the command attaching the wildcard above to the string reporting the command.
For example, $--my-nthreads=%NUM_CPUS%$ (e.g. $--my-nthreads=10$). In other cases, the command can be
inputted explicetely adding the blank space. For example, $-omp %NUM_CPUS%$ (e.g. $-omp 10$).
If the wild card is not present, a blank space is always added after the command
(e.g. $--mycommand => --mycommand 10$).
%
\default{--n-threads=\%NUM\_CPUS\%}
%%%%%% BATCH SIZE
\item \xmlNode{batchSize}, \xmlDesc{integer, optional field},
specifies the number of parallel runs executed simultaneously (e.g.,
Expand Down Expand Up @@ -125,7 +138,8 @@ \subsection{RunInfo: Input of Calculation Flow}
For example, if RAVEN is driving a code named ``FOO,'' and this code has
multi-threading support, this block is used to specify how many threads each
instance of FOO should use (e.g. ``\texttt{FOO --n-threads=N}'' where \texttt{N}
is the number of threads).
is the number of threads). The command to specify the number of threads can be
customized via the node \xmlNode{threadParameter}.
%
\default{1 (or None when the driven code does not have multi-threading
support)}
Expand Down
3 changes: 2 additions & 1 deletion framework/CustomModes/MPILegacySimulationMode.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,8 @@ def modifyInfo(self, runInfoDict):
newRunInfo['precommand'] = runInfoDict["MPIExec"]+" "+nodeCommand+" -n "+str(numMPI)+" "+runInfoDict['precommand']
if(runInfoDict['NumThreads'] > 1):
#add number of threads to the post command.
newRunInfo['postcommand'] = " --n-threads=%NUM_CPUS% "+runInfoDict['postcommand']
newRunInfo['threadParameter'] = runInfoDict['threadParameter']
newRunInfo['postcommand'] =" {} {}".format(newRunInfo['threadParameter'],runInfoDict['postcommand'])
self.raiseAMessage("precommand: "+newRunInfo['precommand']+", postcommand: "+newRunInfo.get('postcommand',runInfoDict['postcommand']))
return newRunInfo

Expand Down
4 changes: 2 additions & 2 deletions framework/CustomModes/MPISimulationMode.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,9 @@ def modifyInfo(self, runInfoDict):
# Note, with defaults the precommand is "mpiexec -f nodeFile -n numMPI"
newRunInfo['precommand'] = runInfoDict["MPIExec"]+" "+nodeCommand+" -n "+str(numMPI)+" "+runInfoDict['precommand']
if runInfoDict['NumThreads'] > 1:
newRunInfo['threadParameter'] = runInfoDict['threadParameter']
#add number of threads to the post command.
newRunInfo['postcommand'] = " --n-threads=%NUM_CPUS% "+runInfoDict['postcommand']

newRunInfo['postcommand'] =" {} {}".format(newRunInfo['threadParameter'],runInfoDict['postcommand'])
self.raiseAMessage("precommand: "+newRunInfo['precommand']+", postcommand: "+newRunInfo.get('postcommand',runInfoDict['postcommand']))
return newRunInfo

Expand Down
66 changes: 46 additions & 20 deletions framework/Optimizers/parentSelectors/parentSelectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,29 +91,55 @@ def tournamentSelection(population,**kwargs):
fitness = kwargs['fitness']
nParents= kwargs['nParents']
pop = population

popSize = population.values.shape[0]

if 'rank' in kwargs:
# the key rank is used in multi-objective optimization where rank identifies which front the point belongs to
rank = kwargs['rank']
multiObjectiveRanking = True
matrixOperationRaw = np.zeros((popSize,3))
matrixOperationRaw[:,0] = np.transpose(np.arange(popSize))
matrixOperationRaw[:,1] = np.transpose(fitness.data)
matrixOperationRaw[:,2] = np.transpose(rank.data)
matrixOperation = np.zeros((popSize,3))
else:
multiObjectiveRanking = False
matrixOperationRaw = np.zeros((popSize,2))
matrixOperationRaw[:,0] = np.transpose(np.arange(popSize))
matrixOperationRaw[:,1] = np.transpose(fitness.data)
matrixOperation = np.zeros((popSize,2))

indexes = list(np.arange(popSize))
indexesShuffled = randomUtils.randomChoice(indexes, size = popSize, replace = False, engine = None)

for idx, val in enumerate(indexesShuffled):
matrixOperation[idx,:] = matrixOperationRaw[val,:]

selectedParent = xr.DataArray(
np.zeros((nParents,np.shape(pop)[1])),
dims=['chromosome','Gene'],
coords={'chromosome':np.arange(nParents),
'Gene': kwargs['variables']})

if nParents >= popSize/2.0:
# generate combination of 2 with replacement
selectionList = np.atleast_2d(randomUtils.randomChoice(list(range(0,popSize)), 2*nParents, replace=False))
else: # nParents < popSize/2.0
# generate combination of 2 without replacement
selectionList = np.atleast_2d(randomUtils.randomChoice(list(range(0,popSize)), 2*nParents))

selectionList = selectionList.reshape(nParents,2)

for index,pair in enumerate(selectionList):
if fitness[pair[0]]>fitness[pair[1]]:
selectedParent[index,:] = pop.values[pair[0],:]
else: # fitness[pair[1]]>fitness[pair[0]]:
selectedParent[index,:] = pop.values[pair[1],:]
np.zeros((nParents,np.shape(pop)[1])),
dims=['chromosome','Gene'],
coords={'chromosome':np.arange(nParents),
'Gene': kwargs['variables']})

if not multiObjectiveRanking: # single-objective implementation of tournamentSelection
for i in range(nParents):
if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]:
index = int(matrixOperation[i,0])
else:
index = int(matrixOperation[i+1,0])
selectedParent[i,:] = pop.values[index,:]
else: # multi-objective implementation of tournamentSelection
for i in range(nParents-1):
if matrixOperation[2*i,2] > matrixOperation[2*i+1,2]:
index = int(matrixOperation[i,0])
elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]:
index = int(matrixOperation[i+1,0])
else: # same rank case
if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]:
index = int(matrixOperation[i,0])
else:
index = int(matrixOperation[i+1,0])
selectedParent[i,:] = pop.values[index,:]

return selectedParent

Expand Down
59 changes: 34 additions & 25 deletions framework/Simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,34 +215,41 @@ def __init__(self, frameworkDir, verbosity='all', interactive=Interaction.No):
sys.path.append(os.getcwd())
#this dictionary contains the general info to run the simulation
self.runInfoDict = {}
self.runInfoDict['DefaultInputFile' ] = 'test.xml' #Default input file to use
self.runInfoDict['SimulationFiles' ] = [] #the xml input file
self.runInfoDict['DefaultInputFile' ] = 'test.xml' #Default input file to use
self.runInfoDict['SimulationFiles' ] = [] #the xml input file
self.runInfoDict['ScriptDir' ] = os.path.join(os.path.dirname(frameworkDir),"scripts") # the location of the pbs script interfaces
self.runInfoDict['FrameworkDir' ] = frameworkDir # the directory where the framework is located
self.runInfoDict['FrameworkDir' ] = frameworkDir # the directory where the framework is located
self.runInfoDict['RemoteRunCommand' ] = os.path.join(frameworkDir,'raven_qsub_command.sh')
self.runInfoDict['NodeParameter' ] = '-f' # the parameter used to specify the files where the nodes are listed
self.runInfoDict['MPIExec' ] = 'mpiexec' # the command used to run mpi commands
self.runInfoDict['WorkingDir' ] = '' # the directory where the framework should be running
self.runInfoDict['TempWorkingDir' ] = '' # the temporary directory where a simulation step is run
self.runInfoDict['NumMPI' ] = 1 # the number of mpi process by run
self.runInfoDict['NumThreads' ] = 1 # Number of Threads by run
self.runInfoDict['numProcByRun' ] = 1 # Total number of core used by one run (number of threads by number of mpi)
self.runInfoDict['batchSize' ] = 1 # number of contemporaneous runs
self.runInfoDict['internalParallel' ] = False # activate internal parallel (parallel python). If True parallel python is used, otherwise multi-threading is used
self.runInfoDict['ParallelCommand' ] = '' # the command that should be used to submit jobs in parallel (mpi)
self.runInfoDict['ThreadingCommand' ] = '' # the command should be used to submit multi-threaded
self.runInfoDict['totalNumCoresUsed' ] = 1 # total number of cores used by driver
self.runInfoDict['queueingSoftware' ] = '' # queueing software name
self.runInfoDict['stepName' ] = '' # the name of the step currently running
self.runInfoDict['precommand' ] = '' # Add to the front of the command that is run
self.runInfoDict['postcommand' ] = '' # Added after the command that is run.
self.runInfoDict['delSucLogFiles' ] = False # If a simulation (code run) has not failed, delete the relative log file (if True)
self.runInfoDict['deleteOutExtension'] = [] # If a simulation (code run) has not failed, delete the relative output files with the listed extension (comma separated list, for example: 'e,r,txt')
self.runInfoDict['mode' ] = '' # Running mode. Curently the only mode supported is mpi but others can be added with custom modes.
self.runInfoDict['Nodes' ] = [] # List of node IDs. Filled only in case RAVEN is run in a DMP machine
self.runInfoDict['expectedTime' ] = '10:00:00' # How long the complete input is expected to run.
self.runInfoDict['NodeParameter' ] = '-f' # the parameter used to specify the files where the nodes are listed
self.runInfoDict['MPIExec' ] = 'mpiexec' # the command used to run mpi commands
self.runInfoDict['threadParameter'] = '--n-threads=%NUM_CPUS%'# the command used to run multi-threading commands.
# The "%NUM_CPUS%" is a wildcard to replace. In this way for commands
# that require the num of threads to be inputted without a
# blank space we can have something like --my-nthreads=%NUM_CPUS%
# (e.g. --my-nthreads=10), otherwise we can have something like
# -omp %NUM_CPUS% (e.g. -omp 10). If not present, a blank
# space is always added (e.g. --mycommand => --mycommand 10)
self.runInfoDict['WorkingDir' ] = '' # the directory where the framework should be running
self.runInfoDict['TempWorkingDir' ] = '' # the temporary directory where a simulation step is run
self.runInfoDict['NumMPI' ] = 1 # the number of mpi process by run
self.runInfoDict['NumThreads' ] = 1 # Number of Threads by run
self.runInfoDict['numProcByRun' ] = 1 # Total number of core used by one run (number of threads by number of mpi)
self.runInfoDict['batchSize' ] = 1 # number of contemporaneous runs
self.runInfoDict['internalParallel' ] = False # activate internal parallel (parallel python). If True parallel python is used, otherwise multi-threading is used
self.runInfoDict['ParallelCommand' ] = '' # the command that should be used to submit jobs in parallel (mpi)
self.runInfoDict['ThreadingCommand' ] = '' # the command should be used to submit multi-threaded
self.runInfoDict['totalNumCoresUsed' ] = 1 # total number of cores used by driver
self.runInfoDict['queueingSoftware' ] = '' # queueing software name
self.runInfoDict['stepName' ] = '' # the name of the step currently running
self.runInfoDict['precommand' ] = '' # Add to the front of the command that is run
self.runInfoDict['postcommand' ] = '' # Added after the command that is run.
self.runInfoDict['delSucLogFiles' ] = False # If a simulation (code run) has not failed, delete the relative log file (if True)
self.runInfoDict['deleteOutExtension'] = [] # If a simulation (code run) has not failed, delete the relative output files with the listed extension (comma separated list, for example: 'e,r,txt')
self.runInfoDict['mode' ] = '' # Running mode. Curently the only mode supported is mpi but others can be added with custom modes.
self.runInfoDict['Nodes' ] = [] # List of node IDs. Filled only in case RAVEN is run in a DMP machine
self.runInfoDict['expectedTime' ] = '10:00:00' # How long the complete input is expected to run.
self.runInfoDict['logfileBuffer' ] = int(io.DEFAULT_BUFFER_SIZE)*50 # logfile buffer size in bytes
self.runInfoDict['clusterParameters' ] = [] # Extra parameters to use with the qsub command.
self.runInfoDict['clusterParameters' ] = [] # Extra parameters to use with the qsub command.
self.runInfoDict['maxQueueSize' ] = None

#Following a set of dictionaries that, in a manner consistent with their names, collect the instance of all objects needed in the simulation
Expand Down Expand Up @@ -586,6 +593,8 @@ def __readRunInfo(self,xmlNode,runInfoSkip,xmlFilename):
self.runInfoDict['NodeParameter'] = element.text.strip()
elif element.tag == 'MPIExec':
self.runInfoDict['MPIExec'] = element.text.strip()
elif element.tag == 'threadParameter':
self.runInfoDict['threadParameter'] = element.text.strip()
elif element.tag == 'JobName':
self.runInfoDict['JobName' ] = element.text.strip()
elif element.tag == 'ParallelCommand':
Expand Down
Loading

0 comments on commit d3563cd

Please sign in to comment.