diff --git a/.pylint b/.pylint new file mode 100644 index 00000000..3775483e --- /dev/null +++ b/.pylint @@ -0,0 +1,280 @@ +[MASTER] + +# Specify a configuration file. +#rcfile= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Profiled execution. +profile=no + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + + +[MESSAGES CONTROL] + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time. See also the "--disable" option for examples. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=C0301,C0103,C0325 + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no + +# Tells whether to display a full report or only the messages +reports=yes + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Add a comment according to your evaluation note. This is used by the global +# evaluation report (RP0004). +comment=no + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + + +[BASIC] + +# Required attributes for module, separated by a comma +required-attributes= + +# List of builtins function names that should not be used, separated by a comma +bad-functions=map,filter,apply,input + +# Regular expression which should only match correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression which should only match correct module level names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression which should only match correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression which should only match correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct instance attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct attribute names in class +# bodies +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Regular expression which should only match correct list comprehension / +# generator expression variable names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=__.*__ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=80 + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + +# List of optional constructs for which whitespace checking is disabled +no-space-check=trailing-comma,dict-separator + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of classes names for which member attributes should not be checked +# (useful for classes with attributes dynamically set). +ignored-classes=SQLObject + +# When zope mode is activated, add a predefined set of Zope acquired attributes +# to generated-members. +zope=no + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E0201 when accessed. Python regular +# expressions are accepted. +generated-members=REQUEST,acl_users,aq_parent + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the beginning of the name of dummy variables +# (i.e. not used). +dummy-variables-rgx=_$|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + + +[CLASSES] + +# List of interface methods to ignore, separated by a comma. This is used for +# instance to not check methods defines in Zope's Interface base class. +ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/CMakeLists.txt b/CMakeLists.txt index cce49c5e..f251d499 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,20 +6,15 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.4.0) # Setup and include CMake modules. SET(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake_modules) +# We activate compiler dependent flags INCLUDE(CheckCXXCompilerFlag) - -# Initialise (empty) list of libraries to link against. -SET(MANDATORY_LIBRARIES "") - -# Look for the math library and if found set it as mandatory -FIND_LIBRARY(SYSTEM_M_LIBRARY NAMES m) -IF(SYSTEM_M_LIBRARY) - SET(MANDATORY_LIBRARIES ${MANDATORY_LIBRARIES} ${SYSTEM_M_LIBRARY}) - MESSAGE(STATUS "m library found: ${SYSTEM_M_LIBRARY}") -ENDIF(SYSTEM_M_LIBRARY) - -# Configuration for GCC. -IF(CMAKE_COMPILER_IS_GNUCXX) +if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + CHECK_CXX_COMPILER_FLAG(-ftemplate-depth=256 CLANG_TEMPLATE_DEPTH) + IF(CLANG_TEMPLATE_DEPTH) + MESSAGE(STATUS "Enabling '-ftemplate-depth=256' compiler flag required since boost 1.54.") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ftemplate-depth=256") + ENDIF(CLANG_TEMPLATE_DEPTH) +elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") CHECK_CXX_COMPILER_FLAG(-fmessage-length=0 GNUCXX_MESSAGE_LENGTH) IF(GNUCXX_MESSAGE_LENGTH) MESSAGE(STATUS "Enabling '-fmessage-length=0' compiler flag.") @@ -41,10 +36,43 @@ IF(CMAKE_COMPILER_IS_GNUCXX) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-ignored-qualifiers") ENDIF(GNUCXX_IGNORED_QUALIFIERS) # Add to the base flags extra warnings. Also, additional flags to turn off some GCC warnings that in practice clutter the compilation output. - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing -Wall -Wextra -Wdisabled-optimization") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing -Wall -Wextra -Wnoexcept -Wdisabled-optimization") # Suggested for multithreaded code. ADD_DEFINITIONS(-D_REENTRANT) -ENDIF(CMAKE_COMPILER_IS_GNUCXX) + +elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel") + # using Intel C++ + CHECK_CXX_COMPILER_FLAG(-mieee-fp INTEL_IEEE_COMPLIANT) + IF(INTEL_IEEE_COMPLIANT) + MESSAGE(STATUS "Enabling '-mieee-fp' compiler flag to get IEEE compliant code") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mieee-fp") + ENDIF(INTEL_IEEE_COMPLIANT) +elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") + # using Visual Studio C++ +endif() + +CHECK_CXX_COMPILER_FLAG(-std=c++0x ALL_C0X) +IF(ALL_C0X) + MESSAGE(STATUS "Enabling '-std=c++0x' compiler flag") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") +ENDIF(ALL_C0X) + +CHECK_CXX_COMPILER_FLAG(-std=c++11 ALL_C11) +IF(ALL_C11) + MESSAGE(STATUS "Enabling '-std=c++11' compiler flag") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +ENDIF(ALL_C11) + + +# Initialise (empty) list of libraries to link against. +SET(MANDATORY_LIBRARIES "") + +# Look for the math library and if found set it as mandatory +FIND_LIBRARY(SYSTEM_M_LIBRARY NAMES m) +IF(SYSTEM_M_LIBRARY) + SET(MANDATORY_LIBRARIES ${MANDATORY_LIBRARIES} ${SYSTEM_M_LIBRARY}) + MESSAGE(STATUS "m library found: ${SYSTEM_M_LIBRARY}") +ENDIF(SYSTEM_M_LIBRARY) # Set default build type to "Release", change it in the GUI if you need to build with debug. IF(NOT CMAKE_BUILD_TYPE) @@ -55,7 +83,7 @@ ENDIF(NOT CMAKE_BUILD_TYPE) # Use CMake's 2.6 new policy for library paths. IF(COMMAND CMAKE_POLICY) - CMAKE_POLICY(SET CMP0003 NEW) + CMAKE_POLICY(SET CMP0003 NEW) ENDIF(COMMAND CMAKE_POLICY) # Provides build options to CMake. @@ -69,10 +97,24 @@ OPTION(BUILD_MAIN "Build 'main.cpp'." ON) # Build Option: when active the bindings to Python are compiled and linked (installation). OPTION(BUILD_PYGMO "Build Python bindings." OFF) +IF(BUILD_PYGMO) + # Detect default Python version + IF(NOT DEFINED PYGMO_PYTHON_VERSION) + INCLUDE(FindPythonInterp) + IF(NOT PYTHONINTERP_FOUND) + MESSAGE(FATAL_ERROR "Unable to locate Python interpreter. Turn off BUILD_PYGMO please.") + ELSE(NOT PYTHONINTERP_FOUND) + MESSAGE(STATUS "Python version ${PYTHON_VERSION_STRING} found.") + # Build Option: build with specific specific Python compatibility + SET(PYGMO_PYTHON_VERSION ${PYTHON_VERSION_STRING} CACHE STRING "Build PyGMO with specific Python compatibility.") + ENDIF(NOT PYTHONINTERP_FOUND) + ENDIF(NOT DEFINED PYGMO_PYTHON_VERSION) +ENDIF(BUILD_PYGMO) + # Build Option: when active the GTOP database problems are built. OPTION(ENABLE_GTOP_DATABASE "Build GTOP database problems (interplanetary transfers)." OFF) IF(ENABLE_GTOP_DATABASE) - ADD_DEFINITIONS(-DPAGMO_ENABLE_KEP_TOOLBOX) + ADD_DEFINITIONS(-DPAGMO_ENABLE_KEP_TOOLBOX) ENDIF(ENABLE_GTOP_DATABASE) # Build Option: minimisers from the GNU scientific library (GSL). OPTION(ENABLE_GSL "Enable support for GSL minimisers (requires GSL >= 1.13)." OFF) @@ -104,14 +146,19 @@ SET(DYNAMIC_LIB_PAGMO_BUILD_FLAGS "-DBOOST_SERIALIZATION_DYN_LINK=1") # Setting the boost libraries needed for PaGMO SET(REQUIRED_BOOST_LIBS system serialization thread) IF(BUILD_PYGMO) - SET(REQUIRED_BOOST_LIBS ${REQUIRED_BOOST_LIBS} python) + INCLUDE(PaGMOPythonSetup) + IF(PYGMO_PYTHON_VERSION LESS 3) + SET(REQUIRED_BOOST_LIBS ${REQUIRED_BOOST_LIBS} python) + ELSE(PYGMO_PYTHON_VERSIO LESS 3) + SET(REQUIRED_BOOST_LIBS ${REQUIRED_BOOST_LIBS} python3) + ENDIF(PYGMO_PYTHON_VERSION LESS 3) ENDIF(BUILD_PYGMO) IF(ENABLE_GTOP_DATABASE) SET(REQUIRED_BOOST_LIBS ${REQUIRED_BOOST_LIBS} date_time) ENDIF(ENABLE_GTOP_DATABASE) MESSAGE(STATUS "Required Boost libraries: ${REQUIRED_BOOST_LIBS}") -FIND_PACKAGE(Boost 1.42.0 REQUIRED COMPONENTS "${REQUIRED_BOOST_LIBS}") +FIND_PACKAGE(Boost 1.48.0 REQUIRED COMPONENTS "${REQUIRED_BOOST_LIBS}") MESSAGE(STATUS "Detected Boost version: ${Boost_VERSION}") # Include system Boost headers. MESSAGE(STATUS "Boost include dirs: ${Boost_INCLUDE_DIRS}") @@ -151,12 +198,12 @@ ELSE(UNIX) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mthreads") ENDIF(MINGW) IF(MSVC) - #This flags are necessary for MSVC requires boost libs to be named in a stupid way. (i.e. without lib in front) - ADD_DEFINITIONS(-DBOOST_ALL_DYN_LINK) + #This flags are necessary for MSVC requires boost libs to be named in a stupid way. (i.e. without lib in front) + ADD_DEFINITIONS(-DBOOST_ALL_DYN_LINK) #This flag is necesary for MSVC to access mathematical constants such as M_PI,... ADD_DEFINITIONS(-D_USE_MATH_DEFINES) - #This is necessary to add Additional Library Directories in the linker path of MSVC - link_directories(${Boost_LIBRARY_DIRS}) + #This is necessary to add Additional Library Directories in the linker path of MSVC + link_directories(${Boost_LIBRARY_DIRS}) ENDIF(MSVC) ENDIF(UNIX) @@ -273,7 +320,7 @@ IF(ENABLE_SNOPT) ENDIF(NOT SNOPT_F2C_LIBRARY) MESSAGE(STATUS "f2c library: ${SNOPT_F2C_LIBRARY}") - SET(MANDATORY_LIBRARIES ${MANDATORY_LIBRARIES} ${SNOPT_SNOPT_LIBRARY} ${SNOPT_SNPRINT_LIBRARY} ${SNOPT_F2C_LIBRARY}) + SET(MANDATORY_LIBRARIES ${MANDATORY_LIBRARIES} ${SNOPT_SNOPT_LIBRARY} ${SNOPT_SNPRINT_LIBRARY} ${SNOPT_F2C_LIBRARY} -lgfortran) ADD_DEFINITIONS(-DPAGMO_ENABLE_SNOPT) ENDIF(ENABLE_SNOPT) @@ -304,31 +351,34 @@ IF(ENABLE_IPOPT) ENDIF(NOT DL_LIBRARY) FIND_LIBRARY(COINHSL_LIBRARY NAMES coinhsl) - IF(COINHSL_LIBRARY) - MESSAGE(STATUS "COINHSL Library Found ... linking it in") - MESSAGE(STATUS "coin hsl library: ${COINHSL_LIBRARY}") - SET(MANDATORY_LIBRARIES ${MANDATORY_LIBRARIES} ${COINHSL_LIBRARY}) - ENDIF(COINHSL_LIBRARY) FIND_LIBRARY(COINMETIS_LIBRARY NAMES coinmetis) + IF(COINMETIS_LIBRARY) MESSAGE(STATUS "COINMETIS Library Found ... linking it in") MESSAGE(STATUS "coin metis library: ${COINMETIS_LIBRARY}") - SET(MANDATORY_LIBRARIES ${MANDATORY_LIBRARIES} ${COINMETIS_LIBRARY}) + SET(MANDATORY_LIBRARIES ${COINMETIS_LIBRARY} ${MANDATORY_LIBRARIES}) ENDIF(COINMETIS_LIBRARY) + FIND_LIBRARY(COINHSL_LIBRARY NAMES coinhsl) + IF(COINHSL_LIBRARY) + MESSAGE(STATUS "COINHSL Library Found ... linking it in") + MESSAGE(STATUS "coin hsl library: ${COINHSL_LIBRARY}") + SET(MANDATORY_LIBRARIES ${COINHSL_LIBRARY} ${MANDATORY_LIBRARIES}) + ENDIF(COINHSL_LIBRARY) + FIND_LIBRARY(COINMUMPS_LIBRARY NAMES coinmumps) IF(COINMUMPS_LIBRARY) MESSAGE(STATUS "COINMUMPS Library Found ... linking it in") MESSAGE(STATUS "coin mumps library: ${COINMUMPS_LIBRARY}") - SET(MANDATORY_LIBRARIES ${MANDATORY_LIBRARIES} ${COINMUMPS_LIBRARY}) + SET(MANDATORY_LIBRARIES ${COINMUMPS_LIBRARY} ${MANDATORY_LIBRARIES}) ENDIF(COINMUMPS_LIBRARY) IF(NOT COINHSL_LIBRARY AND NOT COINMUMPS_LIBRARY) MESSAGE(STATUS "Linear Solver for IPOPT could not be detected. I tried with HSL and MUMPS. You need to link manually the library if necessary") ENDIF(NOT COINHSL_LIBRARY AND NOT COINMUMPS_LIBRARY) - SET(MANDATORY_LIBRARIES ${MANDATORY_LIBRARIES} ${IPOPT_LIBRARY} ${LAPACK_LIBRARIES} ${DL_LIBRARY}) - + SET(MANDATORY_LIBRARIES ${IPOPT_LIBRARY} ${MANDATORY_LIBRARIES} ${LAPACK_LIBRARIES} ${DL_LIBRARY}) + ELSE(UNIX) IF(NOT WIN32) MESSAGE(FATAL_ERROR "Sorry, at the moment pagmo can be built only on Unix and Win32 environments.") @@ -343,7 +393,7 @@ IF(ENABLE_IPOPT) SET(MANDATORY_LIBRARIES ${MANDATORY_LIBRARIES} ${IPOPT_LIBRARY} ${COIN_MUMPS_LIBRARY} ${COIN_BLAS_LIBRARY} ${COIN_LAPACK_LIBRARY} -lgfortran -lpthread) ENDIF(UNIX) - + ADD_DEFINITIONS(-DPAGMO_ENABLE_IPOPT) INCLUDE_DIRECTORIES("${IPOPT_INCLUDE_DIR}") SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS_INIT} ${LAPACK_LINKER_FLAGS}") @@ -356,18 +406,22 @@ ENDIF(ENABLE_IPOPT) # Add the directory for the PaGMO library. ADD_SUBDIRECTORY("${CMAKE_SOURCE_DIR}/src") +# Add the directory for the PyGMO library. IF(BUILD_PYGMO) - INCLUDE(PaGMOPythonSetup) ADD_SUBDIRECTORY("${CMAKE_SOURCE_DIR}/PyGMO") ENDIF(BUILD_PYGMO) # From now on all targets will use the static PaGMO library. Add the relevant flags. SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${STATIC_LIB_PAGMO_USE_FLAGS}") +MESSAGE(STATUS "Build flags: " "${CMAKE_CXX_FLAGS}") +MESSAGE(STATUS "Module linker flags: " "${CMAKE_MODULE_LINKER_FLAGS}") +MESSAGE(STATUS "Shared linker flags: " "${CMAKE_SHARED_LINKER_FLAGS}") + # Link main to pagmo_static library. IF(BUILD_MAIN) ADD_EXECUTABLE(main main.cpp) - TARGET_LINK_LIBRARIES(main pagmo_static ${MANDATORY_LIBRARIES}) + TARGET_LINK_LIBRARIES(main ${MANDATORY_LIBRARIES} pagmo_static) ENDIF(BUILD_MAIN) IF(ENABLE_TESTS) diff --git a/PyGMO/__init__.py b/PyGMO/__init__.py index f0e92597..a2bd099d 100644 --- a/PyGMO/__init__.py +++ b/PyGMO/__init__.py @@ -21,219 +21,317 @@ # Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -import core, algorithm, migration, problem, topology, test, util -from util import hypervolume, hv_algorithm +from PyGMO import core, algorithm, migration, problem, topology, test, util +from PyGMO.util import hypervolume, hv_algorithm __doc__ = 'PyGMO is a pretty cool guy. it kills aliens and doesnt afraid of anything...' -__all__ = ['core', 'algorithm', 'migration', 'problem', 'topology', 'test', 'util'] +__all__ = ['core', 'algorithm', 'migration', + 'problem', 'topology', 'test', 'util'] __version__ = '1.1.5' -# For convenience, bring all core classes into the root namespace when importing *. -from core import * -__all__ += filter(lambda name: not name.startswith('_'),dir(core)) +# For convenience, bring all core classes into the root namespace when +# importing *. +from PyGMO.core import * +__all__ += [name for name in dir(core) if not name.startswith('_')] problem_list = problem._get_problem_list() algorithm_list = algorithm._get_algorithm_list() island_list = core._get_island_list() # Fill up the __extensions__ variable with all detected extensions -__extensions__ = {'nlopt': False, 'gsl': False,'snopt': False,'ipopt': False,'gtop': False,'scipy': False,'networkx': False,'vpython': False, 'pykep': False} +__extensions__ = { + 'nlopt': False, + 'gsl': False, + 'snopt': False, + 'ipopt': False, + 'gtop': False, + 'scipy': False, + 'networkx': False, + 'vpython': False, + 'pykep': False} if "nlopt" in str(algorithm._get_algorithm_list()): - __extensions__['nlopt']=True + __extensions__['nlopt'] = True if "gsl" in str(algorithm._get_algorithm_list()): - __extensions__['gsl']=True + __extensions__['gsl'] = True if "snopt" in str(algorithm._get_algorithm_list()): - __extensions__['snopt']=True + __extensions__['snopt'] = True if "ipopt" in str(algorithm._get_algorithm_list()): - __extensions__['ipopt']=True + __extensions__['ipopt'] = True if "cassini" in str(problem._get_problem_list()): - __extensions__['gtop']=True + __extensions__['gtop'] = True try: - from scipy import __version__ as __scipy_version__ - __extensions__['scipy']=True + from scipy import __version__ as __scipy_version__ + __extensions__['scipy'] = True except ImportError: - pass + pass try: - from networkx.version import version_info as __networkx_version__ - __extensions__['networkx']=True + from networkx.version import version_info as __networkx_version__ + __extensions__['networkx'] = True except ImportError: - pass + pass try: - from visual import version as __visual_version__ - __extensions__['vpython']=True + from visual import version as __visual_version__ + __extensions__['vpython'] = True except ImportError: - pass + pass try: - from PyKEP import __version__ as __pykep_version__ - __extensions__['pykep']=True + from PyKEP import __version__ as __pykep_version__ + __extensions__['pykep'] = True except ImportError: - pass + pass -def run_test(n_trials=200, pop_size = 20, n_gen = 500): - """ - This function runs some tests on the algorthm. Use it to verify the correct installation - of PyGMO. +def run_test(n_trials=200, pop_size=20, n_gen=500): + """ + This function runs some tests on the algorthm. Use it to verify the correct installation + of PyGMO. - USAGE: PyGMO.run_test(n_trials=200, pop_size = 20, n_gen = 500) + USAGE: PyGMO.run_test(n_trials=200, pop_size = 20, n_gen = 500) - * n_trials: each algorithm will be called n_trials times on the same problem to then evaluate best, mean and std - * pop_size: this determines the population size - * n_gen: this regulates the maximim number of function evaluation + * n_trials: each algorithm will be called n_trials times on the same problem to then evaluate best, mean and std + * pop_size: this determines the population size + * n_gen: this regulates the maximim number of function evaluation - """ - from PyGMO import problem, algorithm, island - from numpy import mean, std - number_of_trials = n_trials - number_of_individuals = pop_size - number_of_generations = n_gen + """ + from PyGMO import problem, algorithm, island + from numpy import mean, std + number_of_trials = n_trials + number_of_individuals = pop_size + number_of_generations = n_gen - prob_list = [problem.schwefel(dim = 10), problem.rastrigin(dim = 10), problem.rosenbrock(dim = 10), problem.ackley(dim = 10), problem.griewank(dim = 10), problem.levy5(10)] - if __extensions__['gtop']: - prob_list.append(problem.cassini_1()) - prob_list.append(problem.gtoc_1()) - prob_list.append(problem.cassini_2()) - prob_list.append(problem.messenger_full()) - - algo_list = [algorithm.pso(gen = number_of_generations), algorithm.mde_pbx(gen = number_of_generations, xtol=1e-30, ftol=1e-30), algorithm.de(gen = number_of_generations,xtol=1e-30, ftol=1e-30), algorithm.jde(gen = number_of_generations, memory=False,xtol=1e-30, ftol=1e-30), algorithm.de_1220(gen = number_of_generations, memory=False,xtol=1e-30, ftol=1e-30), algorithm.sa_corana(iter = number_of_generations*number_of_individuals,Ts = 1,Tf = 0.01), algorithm.ihs(iter = number_of_generations*number_of_individuals), algorithm.sga(gen = number_of_generations), algorithm.cmaes(gen = number_of_generations,xtol=1e-30, ftol=1e-30, memory=False), algorithm.bee_colony(gen = number_of_generations/2)] - print('\nTrials: ' + str(n_trials) + ' - Population size: ' + str(pop_size) + ' - Generations: ' + str(n_gen)) - for prob in prob_list: - print('\nTesting problem: ' + prob.get_name() + ', Dimension: ' + str(prob.dimension) ) - print('With Population Size: ' + str(pop_size) ) - for algo in algo_list: - print(' ' + str(algo)) - best = [] - best_x = [] - for i in range(0,number_of_trials): - isl = island(algo,prob,number_of_individuals) - isl.evolve(1) - isl.join() - best.append(isl.population.champion.f) - best_x.append(isl.population.champion.x) - print(' Best:\t' + str(min(best)[0])) - print(' Mean:\t' + str(mean(best))) - print(' Std:\t' + str(std(best))) - -if __extensions__['scipy']: - class race2algos: - """ - This class uses the concept of racing to compare two algorithms - on a probem. It runs repeatedly both algorithms on equal - starting populations up to when it finds a statistical difference between - the obtained samples. The difference is detected using Wilcoxon - ranksum test. The algorithms are tested on populations of equal size. + prob_list = [ + problem.schwefel( + dim=10), problem.rastrigin( + dim=10), problem.rosenbrock( + dim=10), problem.ackley( + dim=10), problem.griewank( + dim=10), problem.levy5(10)] + if __extensions__['gtop']: + prob_list.append(problem.cassini_1()) + prob_list.append(problem.gtoc_1()) + prob_list.append(problem.cassini_2()) + prob_list.append(problem.messenger_full()) + + algo_list = [ + algorithm.pso( + gen=number_of_generations), + algorithm.mde_pbx( + gen=number_of_generations, + xtol=1e-30, + ftol=1e-30), + algorithm.de( + gen=number_of_generations, + xtol=1e-30, + ftol=1e-30), + algorithm.jde( + gen=number_of_generations, + memory=False, + xtol=1e-30, + ftol=1e-30), + algorithm.de_1220( + gen=number_of_generations, + memory=False, + xtol=1e-30, + ftol=1e-30), + algorithm.sa_corana( + iter=number_of_generations * + number_of_individuals, + Ts=1, + Tf=0.01), + algorithm.ihs( + iter=number_of_generations * + number_of_individuals), + algorithm.sga( + gen=number_of_generations), + algorithm.cmaes( + gen=number_of_generations, + xtol=1e-30, + ftol=1e-30, + memory=False), + algorithm.bee_colony( + gen=number_of_generations / + 2)] + print('\nTrials: ' + str(n_trials) + ' - Population size: ' + + str(pop_size) + ' - Generations: ' + str(n_gen)) + for prob in prob_list: + print('\nTesting problem: ' + prob.get_name() + + ', Dimension: ' + str(prob.dimension)) + print('With Population Size: ' + str(pop_size)) + for algo in algo_list: + print(' ' + str(algo)) + best = [] + best_x = [] + for i in range(0, number_of_trials): + isl = island(algo, prob, number_of_individuals) + isl.evolve(1) + isl.join() + best.append(isl.population.champion.f) + best_x.append(isl.population.champion.x) + print(' Best:\t' + str(min(best)[0])) + print(' Mean:\t' + str(mean(best))) + print(' Std:\t' + str(std(best))) + +if __extensions__['scipy']: + class race2algos: + + """ + This class uses the concept of racing to compare two algorithms + on a probem. It runs repeatedly both algorithms on equal + starting populations up to when it finds a statistical difference between + the obtained samples. The difference is detected using Wilcoxon + ranksum test. The algorithms are tested on populations of equal size. + + """ + + def __init__( + self, + algo1, + algo2, + prob, + pop_size=20, + min_trials=20, + p=0.05, + max_runs=200): + """ + Upon construction of the class object the race is initialized and launched. + + USAGE: r = PyGMO.race2algos(algo1, algo2, prob, pop_size=20, min_trials=20, p = 0.05, max_runs=200): + + * algo1: first algorithm in the race + * algo2: second algorithm in the race + * prob: problem (i.e. the "track" the algos are racing upon) + * pop_size: population size of the island where the algos will perform evolution + * min_trials: minimum number of runs to compare the algorithms + * p: confidence level + * max_runs: maximum number of races .... + """ + from random import randint + from copy import deepcopy + from sys import stdout + self.algo1 = algo1 + self.algo2 = algo2 + self.prob = prob + self.res1 = [] + self.res2 = [] + self.pop_size = pop_size + self.p = 0 + self.z = 0 + self.p_req = p + print("Racing the algorithms ...") + + for i in range(max_runs): + stdout.write("\rRuns: %i" % i) + stdout.flush() + # We reset the random number generators of the algorithm + algo1.reset_rngs(randint(0, 9999999)) + algo2.reset_rngs(randint(0, 9999999)) + # We create an island with 20 individuals. This also initalizes + # its population at random within the box bounds + isl1 = island(algo1, prob, self.pop_size) + # We copy the island and change its algo. Like this we make sure the two algorithms + # will evolve the same inital population (good practice) + isl2 = deepcopy(isl1) + isl2.algorithm = algo2 + # We start the evolution (in parallel as we do not call the + # method join()) + isl1.evolve(1) + isl2.evolve(1) + # Here join is called implicitly as we try to access one of the + # islands during evolution + self.res1.append(isl1.population.champion.f[0]) + self.res2.append(isl2.population.champion.f[0]) + # We check that the race is over (only after having accumulated + # at least min_trials samples) + if (i > min_trials): + if (self.are_different(self.res1, self.res2)): + break + + def are_different(self, data1, data2): + from scipy.stats import wilcoxon + self.z, self.p = wilcoxon(data1, data2) + return (self.p < self.p_req) + + def plot(self): + """ + Plots the result of the race + + USAGE: r.plot() + """ + import matplotlib.pyplot as pl + pl.subplot(1, 2, 1) + pl.plot(sorted(self.res1), label="1." + self.algo1.get_name()) + pl.plot(sorted(self.res2), label="2." + self.algo2.get_name()) + pl.title( + self.prob.get_name() + " dim: " + str(self.prob.dimension)) + pl.xlabel('rank') + pl.legend() + + pl.subplot(1, 2, 2) + pl.boxplot([self.res1, self.res2]) + pl.ylabel('Obj.Fun.') + pl.title("Wilcoxon Test, p: %2.2e" % + self.p + ", z: " + str(self.z)) + pl.show() - """ - def __init__(self, algo1, algo2, prob, pop_size=20, min_trials=20, p = 0.05, max_runs=200): - """ - Upon construction of the class object the race is initialized and launched. - - USAGE: r = PyGMO.race2algos(algo1, algo2, prob, pop_size=20, min_trials=20, p = 0.05, max_runs=200): - - * algo1: first algorithm in the race - * algo2: second algorithm in the race - * prob: problem (i.e. the "track" the algos are racing upon) - * pop_size: population size of the island where the algos will perform evolution - * min_trials: minimum number of runs to compare the algorithms - * p: confidence level - * max_runs: maximum number of races .... - """ - from random import randint - from copy import deepcopy - from sys import stdout - self.algo1=algo1 - self.algo2=algo2 - self.prob=prob - self.res1 = [] - self.res2 = [] - self.pop_size = pop_size - self.p = 0 - self.z = 0 - self.p_req = p - print "Racing the algorithms ..." - - for i in range(max_runs): - stdout.write("\rRuns: %i" % i); stdout.flush() - #We reset the random number generators of the algorithm - algo1.reset_rngs(randint(0,9999999)); algo2.reset_rngs(randint(0,9999999)) - #We create an island with 20 individuals. This also initalizes its population at random within the box bounds - isl1 = island(algo1,prob,self.pop_size) - #We copy the island and change its algo. Like this we make sure the two algorithms - #will evolve the same inital population (good practice) - isl2 = deepcopy(isl1) - isl2.algorithm = algo2 - #We start the evolution (in parallel as we do not call the method join()) - isl1.evolve(1); isl2.evolve(1) - #Here join is called implicitly as we try to access one of the islands during evolution - self.res1.append(isl1.population.champion.f[0]) - self.res2.append(isl2.population.champion.f[0]) - #We check that the race is over (only after having accumulated at least min_trials samples) - if (i>min_trials): - if (self.are_different(self.res1,self.res2)): - break - - def are_different(self, data1,data2): - from scipy.stats import wilcoxon - self.z,self.p = wilcoxon(data1,data2) - return (self.p < self.p_req) - - def plot(self): - """ - Plots the result of the race - - USAGE: r.plot() - """ - import matplotlib.pyplot as pl - pl.subplot(1,2,1) - pl.plot(sorted(self.res1),label = "1." + self.algo1.get_name()) - pl.plot(sorted(self.res2), label = "2." + self.algo2.get_name()) - pl.title(self.prob.get_name() + " dim: " + str(self.prob.dimension)) - pl.xlabel('rank') - pl.legend() - - pl.subplot(1,2,2) - pl.boxplot([self.res1,self.res2]) - pl.ylabel('Obj.Fun.') - pl.title("Wilcoxon Test, p: %2.2e" % self.p + ", z: " + str(self.z)) - pl.show() def example_1(n_trials=25, variant_adptv=1, memory=True): - from PyGMO import problem, algorithm, island, archipelago - from PyGMO.topology import fully_connected - from numpy import mean, median - results = list() - prob = problem.messenger_full() - de_variants = [11,13,15,17] - algos = [algorithm.jde(gen=50,variant=v, memory=memory, variant_adptv=variant_adptv) for v in de_variants] - - for trial in range(n_trials): - archi = archipelago(topology=fully_connected()) - for algo in algos: - archi.push_back(island(algo,prob,25)) - print "Trial N: " + str(trial) - archi.evolve(30) - results.append(min([isl.population.champion.f[0] for isl in archi])) - return (mean(results), median(results), min(results), max(results)) - -def example_2(algo=algorithm.de(1), prob = problem.rosenbrock(10), topo = topology.barabasi_albert(3,3), n_evolve = 100, n_isl = 1024, pop_size = 20, color_code='rank'): - from PyGMO import problem, algorithm, island, archipelago - from matplotlib.pyplot import savefig, close - archi = archipelago(algo,prob,n_isl,pop_size,topology=topo) - print "Drawing Initial Condition .. " - pos = archi.draw(scale_by_degree=True,n_size=3,e_alpha=0.03, n_color = color_code) - savefig('archi000', dpi = 72) - close() - for i in range(1,n_evolve): - archi.evolve(1); - archi.join(); - print "Drawing"+ str(i) + "-th evolution .. " - pos = archi.draw(layout = pos, scale_by_degree=True,n_size=3,e_alpha=0.03, n_color = color_code) - savefig('archi%03d' % i, dpi = 72); - close() - + from PyGMO import problem, algorithm, island, archipelago + from PyGMO.topology import fully_connected + from numpy import mean, median + results = list() + prob = problem.messenger_full() + de_variants = [11, 13, 15, 17] + algos = [ + algorithm.jde( + gen=50, + variant=v, + memory=memory, + variant_adptv=variant_adptv) for v in de_variants] + + for trial in range(n_trials): + archi = archipelago(topology=fully_connected()) + for algo in algos: + archi.push_back(island(algo, prob, 25)) + print("Trial N: " + str(trial)) + archi.evolve(30) + results.append(min([isl.population.champion.f[0] for isl in archi])) + return (mean(results), median(results), min(results), max(results)) -#def test_aco(): +def example_2( + algo=algorithm.de(1), + prob=problem.rosenbrock(10), + topo=topology.barabasi_albert( + 3, + 3), + n_evolve=100, + n_isl=1024, + pop_size=20, + color_code='rank'): + from PyGMO import problem, algorithm, island, archipelago + from matplotlib.pyplot import savefig, close + archi = archipelago(algo, prob, n_isl, pop_size, topology=topo) + print("Drawing Initial Condition .. ") + pos = archi.draw( + scale_by_degree=True, n_size=3, e_alpha=0.03, n_color=color_code) + savefig('archi000', dpi=72) + close() + for i in range(1, n_evolve): + archi.evolve(1) + archi.join() + print("Drawing" + str(i) + "-th evolution .. ") + pos = archi.draw( + layout=pos, + scale_by_degree=True, + n_size=3, + e_alpha=0.03, + n_color=color_code) + savefig('archi%03d' % i, dpi=72) + close() + + + +# def test_aco(): # from PyGMO import problem, algorithm, island # from numpy import mean, std # number_of_islands = 5 @@ -254,4 +352,3 @@ def example_2(algo=algorithm.de(1), prob = problem.rosenbrock(10), topo = topolo # best_x.append(isl.population.champion.x) # print(' Best fitness:\t' + str(best[i])) # print(' Best solution:\t' + str(best_x[i])) - diff --git a/PyGMO/algorithm/__init__.py b/PyGMO/algorithm/__init__.py index 9a159190..4a50d350 100644 --- a/PyGMO/algorithm/__init__.py +++ b/PyGMO/algorithm/__init__.py @@ -1,325 +1,411 @@ # -*- coding: utf-8 -*- -from _algorithm import * -from _algorithm import _base -from _base import base -from _example import py_example -from _cmaes import py_cmaes -from _scipy_algos import * +from PyGMO.algorithm._algorithm import * +from PyGMO.algorithm._algorithm import _base +from PyGMO.algorithm._base import base +from PyGMO.algorithm._example import py_example +from PyGMO.algorithm._cmaes import py_cmaes +from PyGMO.algorithm._scipy_algos import * _base = _algorithm._base -#Creating the list of algorithms +# Creating the list of algorithms + + def _get_algorithm_list(): - import _algorithm as algorithm - # Try importing SciPy and NumPy. - try: - import scipy, numpy - algorithm_list = [algorithm.__dict__[n] for n in filter(lambda n: not n.startswith('_') and not n == 'base',dir(algorithm))] - except ImportError as e: - algorithm_list = [algorithm.__dict__[n] for n in filter(lambda n: not n.startswith('_') and not n == 'base' and not n.startswith('scipy'),dir(algorithm))] - return algorithm_list - - - -# Redefining the constructors of all algorithms to obtain good documentation and to allow kwargs -def _de_ctor(self, gen=100, f=0.8, cr=0.9, variant=2, ftol=1e-6, xtol=1e-6, screen_output = False): - """ - Constructs a Differential Evolution algorithm: - - USAGE: algorithm.de(gen=1, f=0.5, cr=0.9, variant=2, ftol=1e-6, xtol=1e-6, screen_output = False) - - * gen: number of generations - * f: weighting factor in [0,1] (if -1 self-adptation is used) - * cr: crossover in [0,1] (if -1 self-adptation is used) - * variant: algoritmic variant to use (one of [1 .. 10]) - 1. DE/best/1/exp - 2. DE/rand/1/exp - 3. DE/rand-to-best/1/exp - 4. DE/best/2/exp - 5. DE/rand/2/exp - 6. DE/best/1/bin - 7. DE/rand/1/bin - 8. DE/rand-to-best/1/bin - 9. DE/best/2/bin - 10. DE/rand/2/bin - * ftol stop criteria on f - * xtol stop criteria on x - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(f) - arg_list.append(cr) - arg_list.append(variant) - arg_list.append(ftol) - arg_list.append(xtol) - self._orig_init(*arg_list) - self.screen_output = screen_output + from PyGMO.algorithm import _algorithm as algorithm + # Try importing SciPy and NumPy. + try: + import scipy + import numpy + algorithm_list = [ + algorithm.__dict__[n] + for n in + [n + for n in dir(algorithm) + if not n.startswith('_') and not n == 'base']] + except ImportError as e: + algorithm_list = [ + algorithm.__dict__[n] + for n in + [n + for n in dir(algorithm) + if not n.startswith('_') and not n == 'base' and not n. + startswith('scipy')]] + return algorithm_list + + +# Redefining the constructors of all algorithms to obtain good +# documentation and to allow kwargs +def _de_ctor( + self, + gen=100, + f=0.8, + cr=0.9, + variant=2, + ftol=1e-6, + xtol=1e-6, + screen_output=False): + """ + Constructs a Differential Evolution algorithm: + + USAGE: algorithm.de(gen=1, f=0.5, cr=0.9, variant=2, ftol=1e-6, xtol=1e-6, screen_output = False) + + * gen: number of generations + * f: weighting factor in [0,1] (if -1 self-adptation is used) + * cr: crossover in [0,1] (if -1 self-adptation is used) + * variant: algoritmic variant to use (one of [1 .. 10]) + 1. DE/best/1/exp + 2. DE/rand/1/exp + 3. DE/rand-to-best/1/exp + 4. DE/best/2/exp + 5. DE/rand/2/exp + 6. DE/best/1/bin + 7. DE/rand/1/bin + 8. DE/rand-to-best/1/bin + 9. DE/best/2/bin + 10. DE/rand/2/bin + * ftol stop criteria on f + * xtol stop criteria on x + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(f) + arg_list.append(cr) + arg_list.append(variant) + arg_list.append(ftol) + arg_list.append(xtol) + self._orig_init(*arg_list) + self.screen_output = screen_output de._orig_init = de.__init__ de.__init__ = _de_ctor - -def _jde_ctor(self, gen=100, variant=2, variant_adptv=1, ftol=1e-6, xtol=1e-6, memory=False, screen_output = False): - """ - Constructs a jDE algorithm (self-adaptive DE) - - REF: "Self-adaptive differential evolution algorithm in constrained real-parameter optimization" - J Brest, V Zumer, MS Maucec - Evolutionary Computation, 2006. - http://dsp.szu.edu.cn/DSP2006/research/publication/yan/WebEdit/UploadFile/Self-adaptive%20Differential%20Evolution%20Algorithm%20for%20Constrained%20Real-Parameter%20Optimization.pdf - - USAGE: algorithm.jde(gen=100, variant=2, variant_adptv=1, ftol=1e-6, xtol=1e-6, memory = False, screen_output = False) - - * gen: number of generations - * variant: algoritmic variant to use (one of [1 .. 18]) - 1. best/1/exp 2. rand/1/exp - 3. rand-to-best/1/exp 4. best/2/exp - 5. rand/2/exp 6. best/1/bin - 7. rand/1/bin 8. rand-to-best/1/bin - 9. best/2/bin 10. rand/2/bin - 11. best/3/exp 12. best/3/bin - 13. rand/3/exp 14. rand/3/bin - 15. rand-to-current/2/exp 16. rand-to-current/2/bin - 17. rand-to-best-and-current/2/exp 18. rand-to-best-and-current/2/bin - * variant_adptv: adaptive scheme to use (one of [1..2]) - 1. random param mutation 2. param mutation follows rand/3 scheme - * ftol: stop criteria on f - * xtol: stop criteria on x - * memory: if True the algorithm internal state is saved and used for the next call - * screen_output: activates screen output of the algorithm (do not use in archipealgo, otherwise the screen will be flooded with - * different island outputs) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(variant) - arg_list.append(variant_adptv) - arg_list.append(ftol) - arg_list.append(xtol) - arg_list.append(memory) - self._orig_init(*arg_list) - self.screen_output = screen_output +def _jde_ctor( + self, + gen=100, + variant=2, + variant_adptv=1, + ftol=1e-6, + xtol=1e-6, + memory=False, + screen_output=False): + """ + Constructs a jDE algorithm (self-adaptive DE) + + REF: "Self-adaptive differential evolution algorithm in constrained real-parameter optimization" + J Brest, V Zumer, MS Maucec - Evolutionary Computation, 2006. + http://dsp.szu.edu.cn/DSP2006/research/publication/yan/WebEdit/UploadFile/Self-adaptive%20Differential%20Evolution%20Algorithm%20for%20Constrained%20Real-Parameter%20Optimization.pdf + + USAGE: algorithm.jde(gen=100, variant=2, variant_adptv=1, ftol=1e-6, xtol=1e-6, memory = False, screen_output = False) + + * gen: number of generations + * variant: algoritmic variant to use (one of [1 .. 18]) + 1. best/1/exp 2. rand/1/exp + 3. rand-to-best/1/exp 4. best/2/exp + 5. rand/2/exp 6. best/1/bin + 7. rand/1/bin 8. rand-to-best/1/bin + 9. best/2/bin 10. rand/2/bin + 11. best/3/exp 12. best/3/bin + 13. rand/3/exp 14. rand/3/bin + 15. rand-to-current/2/exp 16. rand-to-current/2/bin + 17. rand-to-best-and-current/2/exp 18. rand-to-best-and-current/2/bin + * variant_adptv: adaptive scheme to use (one of [1..2]) + 1. random param mutation 2. param mutation follows rand/3 scheme + * ftol: stop criteria on f + * xtol: stop criteria on x + * memory: if True the algorithm internal state is saved and used for the next call + * screen_output: activates screen output of the algorithm (do not use in archipealgo, otherwise the screen will be flooded with + * different island outputs) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(variant) + arg_list.append(variant_adptv) + arg_list.append(ftol) + arg_list.append(xtol) + arg_list.append(memory) + self._orig_init(*arg_list) + self.screen_output = screen_output jde._orig_init = jde.__init__ jde.__init__ = _jde_ctor -def _de_1220_ctor(self, gen=100, variant_adptv=1, allowed_variants = [1,2,3,4,5,6,7,8,9,10], memory = False, ftol=1e-6, xtol=1e-6, screen_output = False): - """ - Constructs a Differential Evolution algorithm (our own brew). Self adaptation on F, CR and mutation variant.: - - USAGE: algorithm.de_1220(gen=100, variant_adptv=1, allowed_variants = [i for i in range(1,19)], memory = False, ftol=1e-6, xtol=1e-6, screen_output = False) - - * gen: number of generations - * variant_adptv: adaptiv scheme to use (one of [1..2]) - 1. random param mutation 2. param mutation follows relative DE scheme - * allowed_variants : a list of the algoritmic variants to mix and self-adapt. Allowed variants are ... - 1. best/1/exp 2. rand/1/exp - 3. rand-to-best/1/exp 4. best/2/exp - 5. rand/2/exp 6. best/1/bin - 7. rand/1/bin 8. rand-to-best/1/bin - 9. best/2/bin 10. rand/2/bin - 11. best/3/exp 12. best/3/bin - 13. rand/3/exp 14. rand/3/bin - 15. rand-to-current/2/exp 16. rand-to-current/2/bin - 17. rand-to-best-and-current/2/exp 18. rand-to-best-and-current/2/bin - * ftol: stop criteria on f - * xtol: stop criteria on x - * memory: if True the algorithm internal state is saved and used for the next call - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(variant_adptv) - arg_list.append(allowed_variants) - arg_list.append(memory) - arg_list.append(ftol) - arg_list.append(xtol) - self._orig_init(*arg_list) - self.screen_output = screen_output + +def _de_1220_ctor( + self, + gen=100, + variant_adptv=1, + allowed_variants=[ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10], + memory=False, + ftol=1e-6, + xtol=1e-6, + screen_output=False): + """ + Constructs a Differential Evolution algorithm (our own brew). Self adaptation on F, CR and mutation variant.: + + USAGE: algorithm.de_1220(gen=100, variant_adptv=1, allowed_variants = [i for i in range(1,19)], memory = False, ftol=1e-6, xtol=1e-6, screen_output = False) + + * gen: number of generations + * variant_adptv: adaptiv scheme to use (one of [1..2]) + 1. random param mutation 2. param mutation follows relative DE scheme + * allowed_variants : a list of the algoritmic variants to mix and self-adapt. Allowed variants are ... + 1. best/1/exp 2. rand/1/exp + 3. rand-to-best/1/exp 4. best/2/exp + 5. rand/2/exp 6. best/1/bin + 7. rand/1/bin 8. rand-to-best/1/bin + 9. best/2/bin 10. rand/2/bin + 11. best/3/exp 12. best/3/bin + 13. rand/3/exp 14. rand/3/bin + 15. rand-to-current/2/exp 16. rand-to-current/2/bin + 17. rand-to-best-and-current/2/exp 18. rand-to-best-and-current/2/bin + * ftol: stop criteria on f + * xtol: stop criteria on x + * memory: if True the algorithm internal state is saved and used for the next call + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(variant_adptv) + arg_list.append(allowed_variants) + arg_list.append(memory) + arg_list.append(ftol) + arg_list.append(xtol) + self._orig_init(*arg_list) + self.screen_output = screen_output de_1220._orig_init = de_1220.__init__ de_1220.__init__ = _de_1220_ctor -def _mde_pbx_ctor(self, gen=100, qperc=0.15, nexp=1.5, ftol=1e-6, xtol=1e-6, screen_output = False): - """ - Constructs a mde_pbx algorithm (self-adaptive DE) - - REF: "An Adaptive Differential Evolution Algorithm With Novel Mutation and Crossover - Strategies for Global Numerical Optimization" - IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS?PART B: CYBERNETICS, VOL. 42, NO. 2, APRIL 20 - - - USAGE: algorithm.mde_pbx(gen=100, qperc=0.15, nexp=1.5, ftol=1e-6, xtol=1e-6, screen_output = False) - - * gen: number of generations - * qperc: percentage of population to choose the best vector - * nexp: exponent for the powermean - * ftol: stop criteria on f - * xtol: stop criteria on x - * screen_output: activates screen output of the algorithm (do not use in archipealgo, otherwise the screen will be flooded with - * different island outputs) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(qperc) - arg_list.append(nexp) - arg_list.append(ftol) - arg_list.append(xtol) - self._orig_init(*arg_list) - self.screen_output = screen_output + +def _mde_pbx_ctor( + self, + gen=100, + qperc=0.15, + nexp=1.5, + ftol=1e-6, + xtol=1e-6, + screen_output=False): + """ + Constructs a mde_pbx algorithm (self-adaptive DE) + + REF: "An Adaptive Differential Evolution Algorithm With Novel Mutation and Crossover + Strategies for Global Numerical Optimization" - IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS?PART B: CYBERNETICS, VOL. 42, NO. 2, APRIL 20 + + + USAGE: algorithm.mde_pbx(gen=100, qperc=0.15, nexp=1.5, ftol=1e-6, xtol=1e-6, screen_output = False) + + * gen: number of generations + * qperc: percentage of population to choose the best vector + * nexp: exponent for the powermean + * ftol: stop criteria on f + * xtol: stop criteria on x + * screen_output: activates screen output of the algorithm (do not use in archipealgo, otherwise the screen will be flooded with + * different island outputs) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(qperc) + arg_list.append(nexp) + arg_list.append(ftol) + arg_list.append(xtol) + self._orig_init(*arg_list) + self.screen_output = screen_output mde_pbx._orig_init = mde_pbx.__init__ mde_pbx.__init__ = _mde_pbx_ctor -def _pso_ctor(self, gen=1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, vcoeff = 0.5, variant = 5, neighb_type = 2, neighb_param = 4): - """ - Constructs a Particle Swarm Optimization (steady-state). The position update is applied - immediately after the velocity update - - REF (for variants 5-6): http://cswww.essex.ac.uk/staff/rpoli/papers/PoliKennedyBlackwellSI2007.pdf - - REF (for variants 1-4): Kennedy, J.; Eberhart, R. (1995). "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942?1948. - - USAGE: algorithm.pso(gen=1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, vcoeff = 0.5, variant = 5, neighb_type = 2, neighb_param = 4) - - * gen: number of generations - * omega: constriction factor (or particle inertia weight) in [0,1] - * eta1: Cognitive component in [0,4] - * eta2: Social component in [0,4] - * vcoeff: Maximum velocity coefficient (w.r.t. the box-bounds width) in [0,1] - * variant: algoritmic variant to use (one of [1 .. 6]) - 1. PSO canonical (with inertia weight) - 2. PSO canonical (with inertia weight - and equal random weights of social and cognitive components) - 3. PSO variant (with inertia weight - same random number for all components.) - 4. PSO variant (with inertia weight - same random number for all components - and equal weights of social and cognitive components) - 5. PSO canonical (with constriction factor) - 6. Fully Informed Particle Swarm (FIPS) - * neighb_type: defines the particle neighbourhood (used for the social component) - 1. gbest neighbourhood topology (fully connected) - 2. lbest neighbourhood topology (ring) - 3. Von-Neumann neighbourhood topology (square lattice) - 4. Randomly-varying neighbourhood topology - * neighb_param: if the lbest topology is selected, it represents each particle's indegree - (also outdegree) in the swarm topology. Particles have neighbours up - to a radius of k = neighb_param / 2 in the ring. If the Randomly-varying neighbourhood topology - is selected, neighb_param represents each particle's maximum outdegree in the swarm topology. - The minimum outdegree is 1 (the particle always connects back to itself). - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(omega) - arg_list.append(eta1) - arg_list.append(eta2) - arg_list.append(vcoeff) - arg_list.append(variant) - arg_list.append(neighb_type) - arg_list.append(neighb_param) - self._orig_init(*arg_list) +def _pso_ctor( + self, + gen=1, + omega=0.7298, + eta1=2.05, + eta2=2.05, + vcoeff=0.5, + variant=5, + neighb_type=2, + neighb_param=4): + """ + Constructs a Particle Swarm Optimization (steady-state). The position update is applied + immediately after the velocity update + + REF (for variants 5-6): http://cswww.essex.ac.uk/staff/rpoli/papers/PoliKennedyBlackwellSI2007.pdf + + REF (for variants 1-4): Kennedy, J.; Eberhart, R. (1995). "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942?1948. + + USAGE: algorithm.pso(gen=1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, vcoeff = 0.5, variant = 5, neighb_type = 2, neighb_param = 4) + + * gen: number of generations + * omega: constriction factor (or particle inertia weight) in [0,1] + * eta1: Cognitive component in [0,4] + * eta2: Social component in [0,4] + * vcoeff: Maximum velocity coefficient (w.r.t. the box-bounds width) in [0,1] + * variant: algoritmic variant to use (one of [1 .. 6]) + 1. PSO canonical (with inertia weight) + 2. PSO canonical (with inertia weight + and equal random weights of social and cognitive components) + 3. PSO variant (with inertia weight + same random number for all components.) + 4. PSO variant (with inertia weight + same random number for all components + and equal weights of social and cognitive components) + 5. PSO canonical (with constriction factor) + 6. Fully Informed Particle Swarm (FIPS) + * neighb_type: defines the particle neighbourhood (used for the social component) + 1. gbest neighbourhood topology (fully connected) + 2. lbest neighbourhood topology (ring) + 3. Von-Neumann neighbourhood topology (square lattice) + 4. Randomly-varying neighbourhood topology + * neighb_param: if the lbest topology is selected, it represents each particle's indegree + (also outdegree) in the swarm topology. Particles have neighbours up + to a radius of k = neighb_param / 2 in the ring. If the Randomly-varying neighbourhood topology + is selected, neighb_param represents each particle's maximum outdegree in the swarm topology. + The minimum outdegree is 1 (the particle always connects back to itself). + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(omega) + arg_list.append(eta1) + arg_list.append(eta2) + arg_list.append(vcoeff) + arg_list.append(variant) + arg_list.append(neighb_type) + arg_list.append(neighb_param) + self._orig_init(*arg_list) pso._orig_init = pso.__init__ pso.__init__ = _pso_ctor -def _pso_gen_ctor(self, gen=1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, vcoeff = 0.5, variant = 5, neighb_type = 2, neighb_param = 4): - """ - Constructs a Particle Swarm Optimization (generational). The position update is applied - only at the end of an entire loop over the population (swarm). Use this version for stochastic problems. - - USAGE: algorithm.pso_gen(gen=1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, vcoeff = 0.5, variant = 5, neighb_type = 2, neighb_param = 4) - - * gen: number of generations - * omega: constriction factor (or particle inertia weight) in [0,1] - * eta1: Cognitive component in [0,4] - * eta2: Social component in [0,4] - * vcoeff: Maximum velocity coefficient (w.r.t. the box-bounds width) in [0,1] - * variant: algoritmic variant to use (one of [1 .. 6]) - 1. PSO canonical (with inertia weight) - 2. PSO canonical (with inertia weight - and equal random weights of social and cognitive components) - 3. PSO variant (with inertia weight - same random number for all components.) - 4. PSO variant (with inertia weight - same random number for all components - and equal weights of social and cognitive components) - 5. PSO canonical (with constriction factor) - 6. Fully Informed Particle Swarm (FIPS) - * neighb_type: defines the particle neighbourhood (used for the social component) - 1. gbest neighbourhood topology (fully connected) - 2. lbest neighbourhood topology (ring) - 3. Von-Neumann neighbourhood topology (square lattice) - 4. Randomly-varying neighbourhood topology - * neighb_param: if the lbest topology is selected, it represents each particle's indegree - (also outdegree) in the swarm topology. Particles have neighbours up - to a radius of k = neighb_param / 2 in the ring. If the Randomly-varying neighbourhood topology - is selected, neighb_param represents each particle's maximum outdegree in the swarm topology. - The minimum outdegree is 1 (the particle always connects back to itself). - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(omega) - arg_list.append(eta1) - arg_list.append(eta2) - arg_list.append(vcoeff) - arg_list.append(variant) - arg_list.append(neighb_type) - arg_list.append(neighb_param) - self._orig_init(*arg_list) + +def _pso_gen_ctor( + self, + gen=1, + omega=0.7298, + eta1=2.05, + eta2=2.05, + vcoeff=0.5, + variant=5, + neighb_type=2, + neighb_param=4): + """ + Constructs a Particle Swarm Optimization (generational). The position update is applied + only at the end of an entire loop over the population (swarm). Use this version for stochastic problems. + + USAGE: algorithm.pso_gen(gen=1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, vcoeff = 0.5, variant = 5, neighb_type = 2, neighb_param = 4) + + * gen: number of generations + * omega: constriction factor (or particle inertia weight) in [0,1] + * eta1: Cognitive component in [0,4] + * eta2: Social component in [0,4] + * vcoeff: Maximum velocity coefficient (w.r.t. the box-bounds width) in [0,1] + * variant: algoritmic variant to use (one of [1 .. 6]) + 1. PSO canonical (with inertia weight) + 2. PSO canonical (with inertia weight + and equal random weights of social and cognitive components) + 3. PSO variant (with inertia weight + same random number for all components.) + 4. PSO variant (with inertia weight + same random number for all components + and equal weights of social and cognitive components) + 5. PSO canonical (with constriction factor) + 6. Fully Informed Particle Swarm (FIPS) + * neighb_type: defines the particle neighbourhood (used for the social component) + 1. gbest neighbourhood topology (fully connected) + 2. lbest neighbourhood topology (ring) + 3. Von-Neumann neighbourhood topology (square lattice) + 4. Randomly-varying neighbourhood topology + * neighb_param: if the lbest topology is selected, it represents each particle's indegree + (also outdegree) in the swarm topology. Particles have neighbours up + to a radius of k = neighb_param / 2 in the ring. If the Randomly-varying neighbourhood topology + is selected, neighb_param represents each particle's maximum outdegree in the swarm topology. + The minimum outdegree is 1 (the particle always connects back to itself). + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(omega) + arg_list.append(eta1) + arg_list.append(eta2) + arg_list.append(vcoeff) + arg_list.append(variant) + arg_list.append(neighb_type) + arg_list.append(neighb_param) + self._orig_init(*arg_list) pso_gen._orig_init = pso_gen.__init__ pso_gen.__init__ = _pso_gen_ctor -def _pso_gen_racing_ctor(self, gen=1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, vcoeff = 0.5, variant = 5, neighb_type = 2, neighb_param = 4, nr_eval_per_x = 5, max_fevals = 10000000): - """ - Constructs a Particle Swarm Optimization (generational). The position update is applied - only at the end of an entire loop over the population (swarm). Use this version for stochastic problems. - - USAGE: algorithm.pso_gen(gen=1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, vcoeff = 0.5, variant = 5, neighb_type = 2, neighb_param = 4, nr_eval_per_x = 5, max_fevals = 10000000) - - * gen: number of generations - * omega: constriction factor (or particle inertia weight) in [0,1] - * eta1: Cognitive component in [0,4] - * eta2: Social component in [0,4] - * vcoeff: Maximum velocity coefficient (w.r.t. the box-bounds width) in [0,1] - * variant: algoritmic variant to use (one of [1 .. 6]) - 1. PSO canonical (with inertia weight) - 2. PSO canonical (with inertia weight - and equal random weights of social and cognitive components) - 3. PSO variant (with inertia weight - same random number for all components.) - 4. PSO variant (with inertia weight - same random number for all components - and equal weights of social and cognitive components) - 5. PSO canonical (with constriction factor) - 6. Fully Informed Particle Swarm (FIPS) - * neighb_type: defines the particle neighbourhood (used for the social component) - 1. gbest neighbourhood topology (fully connected) - 2. lbest neighbourhood topology (ring) - 3. Von-Neumann neighbourhood topology (square lattice) - 4. Randomly-varying neighbourhood topology - * neighb_param: if the lbest topology is selected, it represents each particle's indegree - (also outdegree) in the swarm topology. Particles have neighbours up - to a radius of k = neighb_param / 2 in the ring. If the Randomly-varying neighbourhood topology - is selected, neighb_param represents each particle's maximum outdegree in the swarm topology. - The minimum outdegree is 1 (the particle always connects back to itself). - * nr_eval_per_x: Specify the expected budget to be allocated during racing - * max_fevals: When specified other than -1, this serve as another termination condition -- maximium number of objective function evaluations - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(omega) - arg_list.append(eta1) - arg_list.append(eta2) - arg_list.append(vcoeff) - arg_list.append(variant) - arg_list.append(neighb_type) - arg_list.append(neighb_param) - arg_list.append(nr_eval_per_x) - if max_fevals > 0: - arg_list.append(max_fevals) - self._orig_init(*arg_list) +def _pso_gen_racing_ctor( + self, + gen=1, + omega=0.7298, + eta1=2.05, + eta2=2.05, + vcoeff=0.5, + variant=5, + neighb_type=2, + neighb_param=4, + nr_eval_per_x=5, + max_fevals=10000000): + """ + Constructs a Particle Swarm Optimization (generational). The position update is applied + only at the end of an entire loop over the population (swarm). Use this version for stochastic problems. + + USAGE: algorithm.pso_gen(gen=1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, vcoeff = 0.5, variant = 5, neighb_type = 2, neighb_param = 4, nr_eval_per_x = 5, max_fevals = 10000000) + + * gen: number of generations + * omega: constriction factor (or particle inertia weight) in [0,1] + * eta1: Cognitive component in [0,4] + * eta2: Social component in [0,4] + * vcoeff: Maximum velocity coefficient (w.r.t. the box-bounds width) in [0,1] + * variant: algoritmic variant to use (one of [1 .. 6]) + 1. PSO canonical (with inertia weight) + 2. PSO canonical (with inertia weight + and equal random weights of social and cognitive components) + 3. PSO variant (with inertia weight + same random number for all components.) + 4. PSO variant (with inertia weight + same random number for all components + and equal weights of social and cognitive components) + 5. PSO canonical (with constriction factor) + 6. Fully Informed Particle Swarm (FIPS) + * neighb_type: defines the particle neighbourhood (used for the social component) + 1. gbest neighbourhood topology (fully connected) + 2. lbest neighbourhood topology (ring) + 3. Von-Neumann neighbourhood topology (square lattice) + 4. Randomly-varying neighbourhood topology + * neighb_param: if the lbest topology is selected, it represents each particle's indegree + (also outdegree) in the swarm topology. Particles have neighbours up + to a radius of k = neighb_param / 2 in the ring. If the Randomly-varying neighbourhood topology + is selected, neighb_param represents each particle's maximum outdegree in the swarm topology. + The minimum outdegree is 1 (the particle always connects back to itself). +* nr_eval_per_x: Specify the expected budget to be allocated during racing + * max_fevals: When specified other than -1, this serve as another termination condition -- maximium number of objective function evaluations + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(omega) + arg_list.append(eta1) + arg_list.append(eta2) + arg_list.append(vcoeff) + arg_list.append(variant) + arg_list.append(neighb_type) + arg_list.append(neighb_param) + arg_list.append(nr_eval_per_x) + if max_fevals > 0: + arg_list.append(max_fevals) + self._orig_init(*arg_list) pso_gen_racing._orig_init = pso_gen_racing.__init__ pso_gen_racing.__init__ = _pso_gen_racing_ctor @@ -327,64 +413,83 @@ def _pso_gen_racing_ctor(self, gen=1, omega = 0.7298, eta1 = 2.05, eta2 = 2.05, _algorithm.sga.selection = _algorithm._sga_selection_type _algorithm.sga.mutation = _algorithm._sga_mutation_type -def _sga_ctor(self, gen=1, cr=.95, m=.02, elitism=1, mutation=sga.mutation.GAUSSIAN, width = 0.1, selection=sga.selection.ROULETTE, crossover=sga.crossover.EXPONENTIAL): - """ - Constructs a Simple Genetic Algorithm (generational) - - USAGE: algorithm.sga(self, gen=1, cr=.95, m=.02, elitism=1, mutation=sga.mutation.GAUSSIAN, width = 0.1, selection=sga.selection.ROULETTE, crossover=sga.crossover.EXPONENTIAL) - - * gen: number of generations - * cr: crossover factor in [0,1] - * m: mutation probability (for each component) [0,1] - * elitism: number of generation after which the best is reinserted - * mutation: mutation type (one of [RANDOM, GAUSSIAN]) - * width: the mutation width (in case of a GAUSSIAN bell - this is the std normalized with the width) - * selection: selection startegy (one of [ROULETTE, BEST20]) - * crossover: crossover strategy (one of [BINOMIAL, EXPONENTIAL]) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(cr) - arg_list.append(m) - arg_list.append(elitism) - arg_list.append(mutation) - arg_list.append(width) - arg_list.append(selection) - arg_list.append(crossover) - self._orig_init(*arg_list) + +def _sga_ctor( + self, + gen=1, + cr=.95, + m=.02, + elitism=1, + mutation=sga.mutation.GAUSSIAN, + width=0.1, + selection=sga.selection.ROULETTE, + crossover=sga.crossover.EXPONENTIAL): + """ + Constructs a Simple Genetic Algorithm (generational) + + USAGE: algorithm.sga(self, gen=1, cr=.95, m=.02, elitism=1, mutation=sga.mutation.GAUSSIAN, width = 0.1, selection=sga.selection.ROULETTE, crossover=sga.crossover.EXPONENTIAL) + + * gen: number of generations + * cr: crossover factor in [0,1] + * m: mutation probability (for each component) [0,1] + * elitism: number of generation after which the best is reinserted + * mutation: mutation type (one of [RANDOM, GAUSSIAN]) + * width: the mutation width (in case of a GAUSSIAN bell + this is the std normalized with the width) + * selection: selection startegy (one of [ROULETTE, BEST20]) + * crossover: crossover strategy (one of [BINOMIAL, EXPONENTIAL]) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(cr) + arg_list.append(m) + arg_list.append(elitism) + arg_list.append(mutation) + arg_list.append(width) + arg_list.append(selection) + arg_list.append(crossover) + self._orig_init(*arg_list) sga._orig_init = sga.__init__ sga.__init__ = _sga_ctor _algorithm.vega.crossover = _algorithm._vega_crossover_type _algorithm.vega.mutation = _algorithm._vega_mutation_type -def _vega_ctor(self, gen=1, cr=.95, m=.02, elitism=1, mutation=vega.mutation.GAUSSIAN, width = 0.1, crossover=vega.crossover.EXPONENTIAL): - """ - Constructs a Vector evaluated genetic algorithm - - USAGE: algorithm.vega(self, gen=1, cr=.95, m=.02, elitism=1, mutation=vega.mutation.GAUSSIAN, width = 0.1, crossover=vega.crossover.EXPONENTIAL) - - * gen: number of generations - * cr: crossover factor in [0,1] - * m: mutation probability (for each component) [0,1] - * elitism: number of generation after which the best is reinserted - * mutation: mutation type (one of [RANDOM, GAUSSIAN]) - * width: the mutation width (in case of a GAUSSIAN bell - this is the std normalized with the width) - * crossover: crossover strategy (one of [BINOMIAL, EXPONENTIAL]) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(cr) - arg_list.append(m) - arg_list.append(elitism) - arg_list.append(mutation) - arg_list.append(width) - arg_list.append(crossover) - self._orig_init(*arg_list) + +def _vega_ctor( + self, + gen=1, + cr=.95, + m=.02, + elitism=1, + mutation=vega.mutation.GAUSSIAN, + width=0.1, + crossover=vega.crossover.EXPONENTIAL): + """ + Constructs a Vector evaluated genetic algorithm + + USAGE: algorithm.vega(self, gen=1, cr=.95, m=.02, elitism=1, mutation=vega.mutation.GAUSSIAN, width = 0.1, crossover=vega.crossover.EXPONENTIAL) + + * gen: number of generations + * cr: crossover factor in [0,1] + * m: mutation probability (for each component) [0,1] + * elitism: number of generation after which the best is reinserted + * mutation: mutation type (one of [RANDOM, GAUSSIAN]) + * width: the mutation width (in case of a GAUSSIAN bell + this is the std normalized with the width) + * crossover: crossover strategy (one of [BINOMIAL, EXPONENTIAL]) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(cr) + arg_list.append(m) + arg_list.append(elitism) + arg_list.append(mutation) + arg_list.append(width) + arg_list.append(crossover) + self._orig_init(*arg_list) vega._orig_init = vega.__init__ vega.__init__ = _vega_ctor @@ -392,266 +497,390 @@ def _vega_ctor(self, gen=1, cr=.95, m=.02, elitism=1, mutation=vega.mutation.GAU _algorithm.sga_gray.selection = _algorithm._gray_selection_type _algorithm.sga_gray.mutation = _algorithm._gray_mutation_type -def _sga_gray_ctor(self, gen=1, cr=.95, m=.02, elitism=1, mutation=sga_gray.mutation.UNIFORM, selection=sga_gray.selection.ROULETTE, crossover=sga_gray.crossover.SINGLE_POINT): - """ - Constructs a Simple Genetic Algorithm with gray binary encoding (generational) - - USAGE: algorithm.sga_gray(self, gen=1, cr=.95, m=.02, elitism=1, mutation=sga.mutation.UNIFORM, selection=sga.selection.ROULETTE, crossover=sga.crossover.SINGLE_POINT) - - * gen: Number of generations to evolve. - * cr: crossover factor in [0,1] - * m: mutation probability (of each encoded bit) [0,1] - * elitism: number of generation after which the best is reinserted - * mut: mutation type (one of [UNIFORM]) - * sel: selection strategy (one of [ROULETTE, BEST20]) - * cro: crossover strategy (one of [SINGLE_POINT]) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(cr) - arg_list.append(m) - arg_list.append(elitism) - arg_list.append(mutation) - arg_list.append(selection) - arg_list.append(crossover) - self._orig_init(*arg_list) + +def _sga_gray_ctor( + self, + gen=1, + cr=.95, + m=.02, + elitism=1, + mutation=sga_gray.mutation.UNIFORM, + selection=sga_gray.selection.ROULETTE, + crossover=sga_gray.crossover.SINGLE_POINT): + """ + Constructs a Simple Genetic Algorithm with gray binary encoding (generational) + + USAGE: algorithm.sga_gray(self, gen=1, cr=.95, m=.02, elitism=1, mutation=sga.mutation.UNIFORM, selection=sga.selection.ROULETTE, crossover=sga.crossover.SINGLE_POINT) + + * gen: Number of generations to evolve. + * cr: crossover factor in [0,1] + * m: mutation probability (of each encoded bit) [0,1] + * elitism: number of generation after which the best is reinserted + * mut: mutation type (one of [UNIFORM]) + * sel: selection strategy (one of [ROULETTE, BEST20]) + * cro: crossover strategy (one of [SINGLE_POINT]) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(cr) + arg_list.append(m) + arg_list.append(elitism) + arg_list.append(mutation) + arg_list.append(selection) + arg_list.append(crossover) + self._orig_init(*arg_list) sga_gray._orig_init = sga_gray.__init__ sga_gray.__init__ = _sga_gray_ctor -def _nsga_II_ctor(self, gen=100, cr = 0.95, eta_c = 10, m = 0.01, eta_m = 10): - """ - Constructs a Non-dominated Sorting Genetic Algorithm (NSGA_II) - - USAGE: algorithm.nsga_II(self, gen=100, cr = 0.95, eta_c = 10, m = 0.01, eta_m = 10) - - * gen: number of generations - * cr: crossover factor [0,1[ - * eta_c: Distribution index for crossover - * m: mutation probability [0,1] - * eta_m: Distribution index for mutation - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(cr) - arg_list.append(eta_c) - arg_list.append(m) - arg_list.append(eta_m) - self._orig_init(*arg_list) + +def _nsga_II_ctor(self, gen=100, cr=0.95, eta_c=10, m=0.01, eta_m=10): + """ + Constructs a Non-dominated Sorting Genetic Algorithm (NSGA_II) + + USAGE: algorithm.nsga_II(self, gen=100, cr = 0.95, eta_c = 10, m = 0.01, eta_m = 10) + + * gen: number of generations + * cr: crossover factor [0,1[ + * eta_c: Distribution index for crossover + * m: mutation probability [0,1] + * eta_m: Distribution index for mutation + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(cr) + arg_list.append(eta_c) + arg_list.append(m) + arg_list.append(eta_m) + self._orig_init(*arg_list) nsga_II._orig_init = nsga_II.__init__ nsga_II.__init__ = _nsga_II_ctor -def _sms_emoa_ctor(self, hv_algorithm = None, gen=100, sel_m = 2, cr = 0.95, eta_c = 10, m = 0.01, eta_m = 10): - """ - Constructs a S-Metric Selection Evolutionary Multiobjective Optimiser Algorithm (SMS-EMOA) - - USAGE: algorithm.sms_emoa(self, gen=100, sel_m = 2, cr = 0.95, eta_c = 10, m = 0.01, eta_m = 10) - - * hv_algorithm: hypervolume algorithm object used for the computation of the hypervolume. By default its chosen dynamically - * gen: number of generations - * sel_m: selection method for points in dominated fronts. 1 - always use least contributor, 2 - use domination count for fronts > 1 - * cr: crossover factor [0,1] - * eta_c: Distribution index for crossover - * m: mutation probability [0,1] - * eta_m: Distribution index for mutation - """ - # We set the defaults or the kwargs - arg_list=[] - - from ..util import HypervolumeValidation - if hv_algorithm: - hv_algorithm = HypervolumeValidation.validate_hv_algorithm(hv_algorithm) - arg_list.append(hv_algorithm) - arg_list.append(gen) - arg_list.append(sel_m) - arg_list.append(cr) - arg_list.append(eta_c) - arg_list.append(m) - arg_list.append(eta_m) - self._orig_init(*arg_list) + +def _sms_emoa_ctor( + self, + hv_algorithm=None, + gen=100, + sel_m=2, + cr=0.95, + eta_c=10, + m=0.01, + eta_m=10): + """ + Constructs a S-Metric Selection Evolutionary Multiobjective Optimiser Algorithm (SMS-EMOA) + + USAGE: algorithm.sms_emoa(self, gen=100, sel_m = 2, cr = 0.95, eta_c = 10, m = 0.01, eta_m = 10) + + * hv_algorithm: hypervolume algorithm object used for the computation of the hypervolume. By default its chosen dynamically + * gen: number of generations + * sel_m: selection method for points in dominated fronts. 1 - always use least contributor, 2 - use domination count for fronts > 1 + * cr: crossover factor [0,1] + * eta_c: Distribution index for crossover + * m: mutation probability [0,1] + * eta_m: Distribution index for mutation + """ + # We set the defaults or the kwargs + arg_list = [] + + from PyGMO.util import HypervolumeValidation + if hv_algorithm: + hv_algorithm = HypervolumeValidation.validate_hv_algorithm( + hv_algorithm) + arg_list.append(hv_algorithm) + arg_list.append(gen) + arg_list.append(sel_m) + arg_list.append(cr) + arg_list.append(eta_c) + arg_list.append(m) + arg_list.append(eta_m) + self._orig_init(*arg_list) sms_emoa._orig_init = sms_emoa.__init__ sms_emoa.__init__ = _sms_emoa_ctor -_algorithm.pade.RANDOM = _algorithm._weight_generation.RANDOM -_algorithm.pade.GRID = _algorithm._weight_generation.GRID -_algorithm.pade.LOW_DISCREPANCY = _algorithm._weight_generation.LOW_DISCREPANCY -from PyGMO.problem import decompose -def _pade_ctor(self, gen=10, max_parallelism = 1, decomposition = decompose.BI, solver = None, T = 8, weights = pade.LOW_DISCREPANCY, z = []): - """ - Constructs a Parallel Decomposition Algorithm (PaDe). - - For each element of the population a different single objective problem is generated using a decomposition method. - Those single-objective problems are thus solved in an island model. - At the end of the evolution the population is set as the best individual in each single-objective island. - This algorithm, original with PaGMO, builds upon the MOEA/D framework - - USAGE: algorithm.pade(self, gen=10, max_parallelism = 1, decomposition = decompose.WEIGHTED, solver = jde(100), T = 8, weights = pade.RANDOM, z = None) - - * gen: number of generations - * max_parallelism: the maximum number of single-objective problems to solve at the same time - * solver: the algorithm to use to solve the single-objective problems - * T: the size of the population on each subproblem (must be an even number) - * decomposition = the decomposition method to use (Weighted, Tchebycheff or BI) - * weights: the weight generation method - * z: the reference point (used with Tchebycheff and BI decomposition methods) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(max_parallelism) - arg_list.append(decomposition) - if solver==None: - solver=jde(100) - arg_list.append(solver) - arg_list.append(T) - arg_list.append(weights) - arg_list.append(z) - self._orig_init(*arg_list) + +def _pade_ctor( + self, + gen=10, + decomposition='tchebycheff', + weights='grid', + solver=None, + threads=8, + T=8, + z=[]): + """ + Constructs a Parallel Decomposition Algorithm (PaDe). + + For each element of the population a different single objective problem is generated using a decomposition method. + Those single-objective problems are thus solved in an island model. + At the end of the evolution the population is set as the best individual in each single-objective island. + This algorithm, original with PaGMO, builds upon the MOEA/D framework + + USAGE: algorithm.pade(self, gen=10, , decomposition = 'tchebycheff', weights = 'grid', solver = None, threads = 8, T = 8, z = []) + + * gen: number of generations + * threads: the maximum number of single-objective problems to solve at the same time + * solver: the algorithm to use to solve the single-objective problems + * T: the size of the population on each subproblem (must be an even number) + * decomposition = the decomposition method to use, on of ('weighted', 'tchebycheff' or 'bi') + * weights: weight generation method, one of ('grid', 'low_discrepancy', 'random') + * z: the reference point (used with Tchebycheff and BI decomposition methods) + """ + # We set the defaults or the kwargs + from PyGMO.problem._problem import _decomposition_method + + def decomposition_type(x): + return { + 'weighted': _decomposition_method.WEIGHTED, + 'tchebycheff': _decomposition_method.TCHEBYCHEFF, + 'bi': _decomposition_method.BI, + }[x] + + def weight_generation_type(x): + return { + 'low_discrepancy': _algorithm._weight_generation.LOW_DISCREPANCY, + 'grid': _algorithm._weight_generation.GRID, + 'random': _algorithm._weight_generation.RANDOM, + }[x] + + arg_list = [] + arg_list.append(gen) + arg_list.append(threads) + arg_list.append(decomposition_type(decomposition.lower())) + if solver is None: + solver = jde(100) + arg_list.append(solver) + arg_list.append(T) + arg_list.append(weight_generation_type(weights.lower())) + arg_list.append(z) + self._orig_init(*arg_list) pade._orig_init = pade.__init__ pade.__init__ = _pade_ctor -del decompose + _algorithm.nspso.CROWDING_DISTANCE = _algorithm._diversity_mechanism.CROWDING_DISTANCE _algorithm.nspso.NICHE_COUNT = _algorithm._diversity_mechanism.NICHE_COUNT _algorithm.nspso.MAXMIN = _algorithm._diversity_mechanism.MAXMIN -def _nspso_ctor(self, gen=100, minW = 0.4, maxW = 1.0, C1 = 2.0, C2 = 2.0, - CHI = 1.0, v_coeff = 0.5, leader_selection_range = 5, diversity_mechanism = nspso.CROWDING_DISTANCE): - """ - Constructs a Multi Objective PSO - - USAGE: algorithm.nspso(self, gen=10, minW = 0.4, maxW = 1.0, C1 = 2.0, C2 = 2.0, - CHI = 1.0, v_coeff = 0.5, leader_selection = 5, diversity_mechanism = nspso.CROWDING_DISTANCE): - - * gen: number of generations - * minW: minimum particles' inertia weight (the inertia weight is decreased troughout the run between maxW and minW) - * maxW: maximum particles' inertia weight (the inertia weight is decreased troughout the run between maxW and minW) - * C1: magnitude of the force, applied to the particle's velocity, in the direction of its previous best position - * C2: magnitude of the force, applied to the particle's velocity, in the direction of its global best (leader) - * CHI: velocity scaling factor - * v_coeff: velocity coefficient (determining the maximum allowed particle velocity) - * leader_selection_range the leader of each particle is selected among the best leader_selection_range% individuals - * diversity_mechanism the diversity mechanism to use to mantein diversity on the pareto front - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(minW) - arg_list.append(maxW) - arg_list.append(C1) - arg_list.append(C2) - arg_list.append(CHI) - arg_list.append(v_coeff) - arg_list.append(leader_selection_range) - arg_list.append(diversity_mechanism) - self._orig_init(*arg_list) + + +def _nspso_ctor( + self, + gen=100, + minW=0.4, + maxW=1.0, + C1=2.0, + C2=2.0, + CHI=1.0, + v_coeff=0.5, + leader_selection_range=5, + diversity_mechanism=nspso.CROWDING_DISTANCE): + """ + Constructs a Multi Objective PSO + + USAGE: algorithm.nspso(self, gen=10, minW = 0.4, maxW = 1.0, C1 = 2.0, C2 = 2.0, + CHI = 1.0, v_coeff = 0.5, leader_selection = 5, diversity_mechanism = nspso.CROWDING_DISTANCE): + + * gen: number of generations + * minW: minimum particles' inertia weight (the inertia weight is decreased troughout the run between maxW and minW) + * maxW: maximum particles' inertia weight (the inertia weight is decreased troughout the run between maxW and minW) + * C1: magnitude of the force, applied to the particle's velocity, in the direction of its previous best position + * C2: magnitude of the force, applied to the particle's velocity, in the direction of its global best (leader) + * CHI: velocity scaling factor + * v_coeff: velocity coefficient (determining the maximum allowed particle velocity) + * leader_selection_range the leader of each particle is selected among the best leader_selection_range% individuals + * diversity_mechanism the diversity mechanism to use to mantein diversity on the pareto front + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(minW) + arg_list.append(maxW) + arg_list.append(C1) + arg_list.append(C2) + arg_list.append(CHI) + arg_list.append(v_coeff) + arg_list.append(leader_selection_range) + arg_list.append(diversity_mechanism) + self._orig_init(*arg_list) nspso._orig_init = nspso.__init__ nspso.__init__ = _nspso_ctor -def _spea2_ctor(self, gen=100, cr = 0.95, eta_c = 10, m = 0.01, eta_m = 50, archive_size = 0): - """ - Constructs a Strenght Pareto Evolutionary Algorithm 2 - - USAGE: algorithm.spea2(gen=100, cr = 0.95, eta_c = 10, m = 0.01, eta_m = 50, archive_size = -1) - - * gen: Number of generations to evolve. - * cr: Crossover probability - * eta_c: Distribution index for crossover - * m: Mutation probability - * eta_m: Distribution index for mutation - * archive_size: the size of the non_dominated archive. If archive_size=0 then the archive size is set equal to the population size. The population returned after evolve has a size equal to archive_size - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(cr) - arg_list.append(eta_c) - arg_list.append(m) - arg_list.append(eta_m) - arg_list.append(archive_size) - self._orig_init(*arg_list) + +def _spea2_ctor( + self, + gen=100, + cr=0.95, + eta_c=10, + m=0.01, + eta_m=50, + archive_size=0): + """ + Constructs a Strenght Pareto Evolutionary Algorithm 2 + + USAGE: algorithm.spea2(gen=100, cr = 0.95, eta_c = 10, m = 0.01, eta_m = 50, archive_size = -1) + + * gen: Number of generations to evolve. + * cr: Crossover probability + * eta_c: Distribution index for crossover + * m: Mutation probability + * eta_m: Distribution index for mutation + * archive_size: the size of the non_dominated archive. If archive_size=0 then the archive size is set equal to the population size. The population returned after evolve has a size equal to archive_size + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(cr) + arg_list.append(eta_c) + arg_list.append(m) + arg_list.append(eta_m) + arg_list.append(archive_size) + self._orig_init(*arg_list) spea2._orig_init = spea2.__init__ spea2.__init__ = _spea2_ctor -def _sa_corana_ctor(self, iter = 10000, Ts = 10, Tf = .1, steps = 1, bin_size = 20, range = 1): - """ - Constructs Corana's Simulated Annealing - - USAGE: algorithm.sa_corana(iter = 10000, Ts = 10, Tf = .1, steps = 1, bin_size = 20, range = 1) - - NOTE: as this version of simulated annealing loops through the chromosome, the iter number needs to be selected - large enough to allow the temperature schedule to actuallt make sense. For example if your problem has D dimensions - then in order to have at least N temperature adjustments (from Ts to Tf) one should select iter = D * N * steps * bin_size. - - * iter: number of total iterations - * Ts: starting temperature - * Tf: final temperature ( > Ts) - * steps: number of steps adjustments - * bin_size: size of the bin used to evaluate the step adjustment - * range: initial size of the neighbourhood (in [0,1]) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(iter) - arg_list.append(Ts) - arg_list.append(Tf) - arg_list.append(steps) - arg_list.append(bin_size) - arg_list.append(range) - self._orig_init(*arg_list) +from PyGMO.problem import decompose + + +def _moead_ctor( + self, + gen=100, + weights='grid', + T=20, + realb=0.9, + limit=2, + cr=1.0, + f=0.5, + eta_m=20, + diversity=True): + """ + Multi Objective Evolutionary Algorithm based on Decomposition and Differential Evolution (MOEA/D - DE) + + REF Zhang, Qingfu, and Hui Li. "MOEA/D: A multiobjective evolutionary algorithm based on decomposition." Evolutionary Computation, IEEE Transactions on 11.6 (2007): 712-731. + REF Li, Hui, and Qingfu Zhang. "Multiobjective optimization problems with complicated Pareto sets, MOEA/D and NSGA-II." Evolutionary Computation, IEEE Transactions on 13.2 (2009): 284-302. + + USAGE: algorithm.spea2(gen=100, weights = 'grid', T = 20, realb = 0.9, limit = 2, cr = 1.0, f = 0.5, eta_m=20, diversity=True) + + * gen: Number of generations to evolve. + * weights: weight generation method, one of ('grid', 'low_discrepancy', 'random') + * T: Size of the neighbourhood + * realb Chance that the neighbourhood is T rather than the whole population (only if diversity is True) + * limit Maximum number of copies reinserted in the population (only if diversity is True) + * cr Crossover parameter in the Differential Evolution operator + * f f parameter in the Differential Evolution operator + * eta_m Distribution index for the polynomial mutation + * diversity when true activates the two diversity preservation mechanism described in Li, Hui, and Qingfu Zhang paper + """ + def weight_generation_type(x): + return { + 'low_discrepancy': _algorithm._weight_generation_moead.LOW_DISCREPANCY, + 'grid': _algorithm._weight_generation_moead.GRID, + 'random': _algorithm._weight_generation_moead.RANDOM, + }[x] + + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(weight_generation_type(weights)) + arg_list.append(T) + arg_list.append(realb) + arg_list.append(limit) + arg_list.append(cr) + arg_list.append(f) + arg_list.append(eta_m) + arg_list.append(diversity) + self._orig_init(*arg_list) +moead._orig_init = moead.__init__ +moead.__init__ = _moead_ctor + + +def _sa_corana_ctor( + self, + iter=10000, + Ts=10, + Tf=.1, + steps=1, + bin_size=20, + range=1): + """ + Constructs Corana's Simulated Annealing + + USAGE: algorithm.sa_corana(iter = 10000, Ts = 10, Tf = .1, steps = 1, bin_size = 20, range = 1) + + NOTE: as this version of simulated annealing loops through the chromosome, the iter number needs to be selected + large enough to allow the temperature schedule to actuallt make sense. For example if your problem has D dimensions + then in order to have at least N temperature adjustments (from Ts to Tf) one should select iter = D * N * steps * bin_size. + + * iter: number of total iterations + * Ts: starting temperature + * Tf: final temperature ( > Ts) + * steps: number of steps adjustments + * bin_size: size of the bin used to evaluate the step adjustment + * range: initial size of the neighbourhood (in [0,1]) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(iter) + arg_list.append(Ts) + arg_list.append(Tf) + arg_list.append(steps) + arg_list.append(bin_size) + arg_list.append(range) + self._orig_init(*arg_list) sa_corana._orig_init = sa_corana.__init__ sa_corana.__init__ = _sa_corana_ctor -def _bee_colony_ctor(self, gen = 100, limit = 20): - """ - Constructs an Artificial Bee Colony Algorithm - - USAGE: algorithm.bee_colony(gen = 100, limit = 20) - - * gen: number of 'generations' (each generation 2*NP function evaluations - are made where NP is the population size) - * limit: number of tries after which a source of food is dropped if not improved - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(limit) - self._orig_init(*arg_list) + +def _bee_colony_ctor(self, gen=100, limit=20): + """ + Constructs an Artificial Bee Colony Algorithm + + USAGE: algorithm.bee_colony(gen = 100, limit = 20) + + * gen: number of 'generations' (each generation 2*NP function evaluations + are made where NP is the population size) + * limit: number of tries after which a source of food is dropped if not improved + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(limit) + self._orig_init(*arg_list) bee_colony._orig_init = bee_colony.__init__ bee_colony.__init__ = _bee_colony_ctor -def _sea_ctor(self, gen = 100, limit = 20): - """ - Constructs a simple (N+1)-EA: A Simple Evolutionary Algorithm - - USAGE: algorithm.ea(gen = 1) - SEE : Oliveto, Pietro S., Jun He, and Xin Yao. - "Time complexity of evolutionary algorithms for combinatorial optimization: A decade of results." - International Journal of Automation and Computing 4.3 (2007): 281-293. - - * gen: number of 'generations' (each generation is one function evaluation) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - self._orig_init(*arg_list) + +def _sea_ctor(self, gen=100, limit=20): + """ + Constructs a simple (N+1)-EA: A Simple Evolutionary Algorithm + + USAGE: algorithm.ea(gen = 1) + SEE : Oliveto, Pietro S., Jun He, and Xin Yao. + "Time complexity of evolutionary algorithms for combinatorial optimization: A decade of results." + International Journal of Automation and Computing 4.3 (2007): 281-293. + + * gen: number of 'generations' (each generation is one function evaluation) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + self._orig_init(*arg_list) sea._orig_init = sea.__init__ sea.__init__ = _sea_ctor -#def _firefly_ctor(self,**kwargs): +# def _firefly_ctor(self,**kwargs): # """ # Constructs a Firefly Algorithm -# +# # USAGE: algorithm.firefly(gen = 1, alpha = 0.01, beta = 1.0, gamma = 0.8) -# -# * gen: number of 'generations' +# +# * gen: number of 'generations' # * alpha: width of the random vector (in [0,1]) # * beta: maximum attractiveness (in [0,1]) # * gamma: absorption coefficient (in [0,1]) # """ -# # We set the defaults or the kwargs +# We set the defaults or the kwargs # arg_list=[] # arg_list.append(kwargs.pop('gen', 1)) # arg_list.append(kwargs.pop('alpha', 20)) @@ -661,148 +890,175 @@ def _sea_ctor(self, gen = 100, limit = 20): #firefly._orig_init = firefly.__init__ #firefly.__init__ = _firefly_ctor -def _ms_ctor(self, algorithm = None, iter = 1): - """ - Constructs a Multistart Algorithm - - USAGE: algorithm.ms(algorithm = algorithm.de(), iter = 1) - - NOTE: starting from pop1, at each iteration a random pop2 is evolved - with the selected algorithm and its final best replaces the worst of pop1 - - * algorithm: PyGMO algorithm to be multistarted - * iter: number of multistarts - - """ - # We set the defaults or the kwargs - arg_list=[] - if algorithm == None: - algorithm = _algorithm.jde() - arg_list.append(algorithm) - arg_list.append(iter) - self._orig_init(*arg_list) + +def _ms_ctor(self, algorithm=None, iter=1): + """ + Constructs a Multistart Algorithm + + USAGE: algorithm.ms(algorithm = algorithm.de(), iter = 1) + + NOTE: starting from pop1, at each iteration a random pop2 is evolved + with the selected algorithm and its final best replaces the worst of pop1 + + * algorithm: PyGMO algorithm to be multistarted + * iter: number of multistarts + + """ + # We set the defaults or the kwargs + arg_list = [] + if algorithm is None: + algorithm = _algorithm.jde() + arg_list.append(algorithm) + arg_list.append(iter) + self._orig_init(*arg_list) ms._orig_init = ms.__init__ ms.__init__ = _ms_ctor -def _cs_ctor(self, max_eval = 1, stop_range = 0.01, start_range = 0.1, reduction_coeff = 0.5): - """ - Constructs a Compass Search Algorithm - - USAGE: algorithm.cs(max_eval = 1, stop_range = 0.01, start_range = 0.1, reduction_coeff = 0.5); - - - * max_eval: maximum number of function evaluations - * stop_range: when the range is reduced to a value smaller than stop_range cs stops - * start_range: starting range (non-dimensional wrt ub-lb) - * reduction_coeff: the range is multiplied by reduction_coeff whenever no improvment is made - across one chromosome - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_eval) - arg_list.append(stop_range) - arg_list.append(start_range) - arg_list.append(reduction_coeff) - self._orig_init(*arg_list) + +def _cs_ctor( + self, + max_eval=1, + stop_range=0.01, + start_range=0.1, + reduction_coeff=0.5): + """ + Constructs a Compass Search Algorithm + + USAGE: algorithm.cs(max_eval = 1, stop_range = 0.01, start_range = 0.1, reduction_coeff = 0.5); + + + * max_eval: maximum number of function evaluations + * stop_range: when the range is reduced to a value smaller than stop_range cs stops + * start_range: starting range (non-dimensional wrt ub-lb) + * reduction_coeff: the range is multiplied by reduction_coeff whenever no improvment is made + across one chromosome + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_eval) + arg_list.append(stop_range) + arg_list.append(start_range) + arg_list.append(reduction_coeff) + self._orig_init(*arg_list) cs._orig_init = cs.__init__ cs.__init__ = _cs_ctor -def _mbh_ctor(self, algorithm = None, stop = 5, perturb = 5e-2, screen_output = False): - """ - Constructs a Monotonic Basin Hopping Algorithm (generalized to accept any algorithm) - - USAGE: algorithm.mbh(algorithm = algorithm.cs(), stop = 5, perturb = 5e-2); - - NOTE: Starting from pop, algorithm is applied to the perturbed pop returning pop2. If pop2 is better than - pop then pop=pop2 and a counter is reset to zero. If pop2 is not better the counter is incremented. If - the counter is larger than stop, optimization is terminated - - * algorithm: 'local' optimiser - * stop: number of no improvements before halting the optimization - * perturb: non-dimentional perturbation width (can be a list, in which case - it has to have the same dimension of the problem mbh will be applied to) - * screen_output: activates screen output of the algorithm (do not use in archipealgo, otherwise the screen will be flooded with - * different island outputs) - """ - # We set the defaults or the kwargs - arg_list=[] - if algorithm==None: - algorithm=_algorithm.cs() - arg_list.append(algorithm) - arg_list.append(stop) - arg_list.append(perturb) - self._orig_init(*arg_list) - self.screen_output = screen_output + +def _mbh_ctor(self, algorithm=None, stop=5, perturb=5e-2, screen_output=False): + """ + Constructs a Monotonic Basin Hopping Algorithm (generalized to accept any algorithm) + + USAGE: algorithm.mbh(algorithm = algorithm.cs(), stop = 5, perturb = 5e-2); + + NOTE: Starting from pop, algorithm is applied to the perturbed pop returning pop2. If pop2 is better than + pop then pop=pop2 and a counter is reset to zero. If pop2 is not better the counter is incremented. If + the counter is larger than stop, optimization is terminated + + * algorithm: 'local' optimiser + * stop: number of no improvements before halting the optimization + * perturb: non-dimentional perturbation width (can be a list, in which case + it has to have the same dimension of the problem mbh will be applied to) + * screen_output: activates screen output of the algorithm (do not use in archipealgo, otherwise the screen will be flooded with + * different island outputs) + """ + # We set the defaults or the kwargs + arg_list = [] + if algorithm is None: + algorithm = _algorithm.cs() + arg_list.append(algorithm) + arg_list.append(stop) + arg_list.append(perturb) + self._orig_init(*arg_list) + self.screen_output = screen_output mbh._orig_init = mbh.__init__ mbh.__init__ = _mbh_ctor -def _cstrs_self_adaptive_ctor(self, algorithm = None, max_iter = 100, f_tol = 1e-15, x_tol = 1e-15): - """ - Constructs a Self-Adaptive Fitness constraints handling Meta Algorithm. - - The key idea of this constraint handling technique is to represent the - constraint violation by a single infeasibility measure, and to adapt - dynamically the penalization of infeasible solutions. - - USAGE: algorithm.self_adaptive(algorithm = algorithm.jde(), max_iter = 100, f_tol = 1e-15, x_tol = 1e-15); - - * algorithm: original optimizer - * max_iter: stop-criteria (number of iterations) - * ftol: 1e-15 by default. The stopping criteria on the x tolerance. - * xtol: 1e-15 by default. The stopping criteria on the f tolerance. - """ - # We set the defaults or the kwargs - arg_list=[] - if algorithm == None: - algorithm = _algorithm.jde() - arg_list.append(algorithm) - arg_list.append(max_iter) - arg_list.append(f_tol) - arg_list.append(x_tol) - self._orig_init(*arg_list) + +def _cstrs_self_adaptive_ctor( + self, + algorithm=None, + max_iter=100, + f_tol=1e-15, + x_tol=1e-15): + """ + Constructs a Self-Adaptive Fitness constraints handling Meta Algorithm. + + The key idea of this constraint handling technique is to represent the + constraint violation by a single infeasibility measure, and to adapt + dynamically the penalization of infeasible solutions. + + USAGE: algorithm.self_adaptive(algorithm = algorithm.jde(), max_iter = 100, f_tol = 1e-15, x_tol = 1e-15); + + * algorithm: original optimizer + * max_iter: stop-criteria (number of iterations) + * ftol: 1e-15 by default. The stopping criteria on the x tolerance. + * xtol: 1e-15 by default. The stopping criteria on the f tolerance. + """ + # We set the defaults or the kwargs + arg_list = [] + if algorithm is None: + algorithm = _algorithm.jde() + arg_list.append(algorithm) + arg_list.append(max_iter) + arg_list.append(f_tol) + arg_list.append(x_tol) + self._orig_init(*arg_list) cstrs_self_adaptive._orig_init = cstrs_self_adaptive.__init__ cstrs_self_adaptive.__init__ = _cstrs_self_adaptive_ctor # Renaming and placing the enums _algorithm.cstrs_co_evolution.method = _algorithm._co_evo_method_type -def _cstrs_co_evolution_ctor(self,original_algo = None,original_algo_penalties = None,pop_penalties_size = 30,gen = 20,method = cstrs_co_evolution.method.SIMPLE,pen_lower_bound = 0.,pen_upper_bound = 100000.,f_tol = 1e-15,x_tol = 1e-15): - """ - Constructs a co-evolution adaptive penalty algorithm for constrained optimization. - - USAGE: algorithm.cstrs_co_evolution(original_algo = _algorithm.jde(), original_algo_penalties = _algorithm.jde(), pop_penalties_size = 30, gen = 20, method = cstrs_co_evolution.method.SIMPLE, pen_lower_bound = 0, pen_upper_bound = 100000,f_tol = 1e-15,x_tol = 1e-15): - - * original_algo: optimizer to use as 'original' optimization method - * original_algo_penalties: optimizer to use as 'original' optimization method for population encoding penalties coefficients - * pop_penalties_size: size of the population encoding the penalty parameters. - * gen: number of generations. - * method: cstrs_co_evolution.method.SIMPLE by default, the method used for the population encoding penalties coefficients. - Three possibililties are available: SIMPLE, SPLIT_NEQ_EQ and SPLIT_CONSTRAINTS. - The simple one is the original version of the Coello/He implementation. The SPLIT_NEQ_EQ, - splits the equalities and inequalities constraints in two different sets for the - penalty weigths, containing respectively inequalities and equalities weigths. The - SPLIT_CONSTRAINTS splits the constraints in M set of weigths wehere M is the number of - constraints. - * pen_lower_bound: the lower boundary used for penalty. - * pen_upper_bound: the upper boundary used for penalty. - * ftol: 1e-15 by default. The stopping criteria on the x tolerance. - * xtol: 1e-15 by default. The stopping criteria on the f tolerance. - """ - arg_list=[] - if original_algo==None: - original_algo = algorithm.jde() - if original_algo_penalties==None: - original_algo_penalties = algorithm.jde() - arg_list.append(original_algo) - arg_list.append(original_algo_penalties) - arg_list.append(pop_penalties_size) - arg_list.append(gen) - arg_list.append(method) - arg_list.append(pen_lower_bound) - arg_list.append(pen_upper_bound) - arg_list.append(f_tol) - arg_list.append(x_tol) - self._orig_init(*arg_list) + +def _cstrs_co_evolution_ctor( + self, + original_algo=None, + original_algo_penalties=None, + pop_penalties_size=30, + gen=20, + method=cstrs_co_evolution.method.SIMPLE, + pen_lower_bound=0., + pen_upper_bound=100000., + f_tol=1e-15, + x_tol=1e-15): + """ + Constructs a co-evolution adaptive penalty algorithm for constrained optimization. + + USAGE: algorithm.cstrs_co_evolution(original_algo = _algorithm.jde(), original_algo_penalties = _algorithm.jde(), pop_penalties_size = 30, gen = 20, method = cstrs_co_evolution.method.SIMPLE, pen_lower_bound = 0, pen_upper_bound = 100000,f_tol = 1e-15,x_tol = 1e-15): + + * original_algo: optimizer to use as 'original' optimization method + * original_algo_penalties: optimizer to use as 'original' optimization method for population encoding penalties coefficients + * pop_penalties_size: size of the population encoding the penalty parameters. + * gen: number of generations. + * method: cstrs_co_evolution.method.SIMPLE by default, the method used for the population encoding penalties coefficients. + Three posssibililties are available: SIMPLE, + SPLIT_NEQ_EQ and SPLIT_CONSTRAINTS. The simple one is the original + version of the Coello/He implementation (one penalty coefficient weights + the sum of the constraints violation, one the number of violated constraints). + The SPLIT_NEQ_EQ, splits the equalities and inequalities constraints in two different sets for the + penalty weigths, containing respectively inequalities and equalities + weigths. The SPLIT_CONSTRAINTS splits the constraints in M set of weigths + with M the number of constraints. + * pen_lower_bound: the lower boundary used for penalty. + * pen_upper_bound: the upper boundary used for penalty. + * ftol: 1e-15 by default. The stopping criteria on the x tolerance. + * xtol: 1e-15 by default. The stopping criteria on the f tolerance. + """ + arg_list = [] + if original_algo is None: + original_algo = algorithm.jde() + if original_algo_penalties is None: + original_algo_penalties = algorithm.jde() + arg_list.append(original_algo) + arg_list.append(original_algo_penalties) + arg_list.append(pop_penalties_size) + arg_list.append(gen) + arg_list.append(method) + arg_list.append(pen_lower_bound) + arg_list.append(pen_upper_bound) + arg_list.append(f_tol) + arg_list.append(x_tol) + self._orig_init(*arg_list) cstrs_co_evolution._orig_init = cstrs_co_evolution.__init__ cstrs_co_evolution.__init__ = _cstrs_co_evolution_ctor @@ -811,520 +1067,618 @@ def _cstrs_co_evolution_ctor(self,original_algo = None,original_algo_penalties = _algorithm.cstrs_immune_system.inject_method = _algorithm._immune_inject_method_type _algorithm.cstrs_immune_system.distance_method = _algorithm._immune_distance_method_type -def _cstrs_immune_system_ctor(self,algorithm = None, algorithm_immune = None, gen = 1, select_method = cstrs_immune_system.select_method.BEST_ANTIBODY, inject_method = cstrs_immune_system.inject_method.CHAMPION, distance_method = cstrs_immune_system.distance_method.EUCLIDEAN, phi = 0.5, gamma = 0.5, sigma = 1./3., f_tol = 1e-15, x_tol = 1e-15): - """ - Constructs an immune system algorithm for constrained optimization. - - USAGE: algorithm._cstrs_immune_system(algorithm = _algorithm.jde(), algorithm_immune = _algorithm.jde(), gen = 1, select_method = cstrs_immune_system.select_method.BEST_ANTIBODY, inject_method = cstrs_immune_system.inject_method.CHAMPION, distance_method = cstrs_immune_system.distance_method.EUCLIDEAN, phi = 0.5, gamma = 0.5, sigma = 1./3., ftol = 1e-15, xtol = 1e-15): - - * algorithm: optimizer to use as 'original' optimization method. Its number of generations should be set to 1. - * algorithm_2: optimizer to use as 'original' optimization method for the evolution of the immune system. - * gen: number of generations. - * select_method: cstrs_immune_system.select_method.BEST_ANTIBODY by default, the method used for selecting the antibodies. - * inject_method: cstrs_immune_system.inject_method.CHAMPION by default, the method used for reinjecting the antibodies. - * distance_method: cstrs_immune_system.distance_method.EUCLIDEAN by default, the method used for computing the distance to the antigenes population. - * Two possibilities are available: CHAMPION, and BEST25. - * phi: 0.5 by default. The feasible fraction selection to compute the mean value. - * gamma: 0.5 by default. The number of antigens selected / number of total antigens. - * sigma: 1/3 by default. The number of antibodies / number of antigens. - * ftol: 1e-15 by default. The stopping criteria on the x tolerance. - * xtol: 1e-15 by default. The stopping criteria on the f tolerance. - """ - arg_list=[] - - if algorithm==None: - algorithm = algorithm.jde() - if algorithm_immune==None: - algorithm_immune = algorithm.jde() - arg_list.append(algorithm) - arg_list.append(algorithm_immune) - arg_list.append(gen) - arg_list.append(select_method) - arg_list.append(inject_method) - arg_list.append(distance_method) - arg_list.append(phi) - arg_list.append(gamma) - arg_list.append(sigma) - arg_list.append(f_tol) - arg_list.append(x_tol) - self._orig_init(*arg_list) + +def _cstrs_immune_system_ctor( + self, + algorithm=None, + algorithm_immune=None, + gen=1, + select_method=cstrs_immune_system.select_method.BEST_ANTIBODY, + inject_method=cstrs_immune_system.inject_method.CHAMPION, + distance_method=cstrs_immune_system.distance_method.EUCLIDEAN, + phi=0.5, + gamma=0.5, + sigma=1. / 3., + f_tol=1e-15, + x_tol=1e-15): + """ + Constructs an immune system algorithm for constrained optimization. + + USAGE: algorithm._cstrs_immune_system(algorithm = _algorithm.jde(), algorithm_immune = _algorithm.jde(), gen = 1, select_method = cstrs_immune_system.select_method.BEST_ANTIBODY, inject_method = cstrs_immune_system.inject_method.CHAMPION, distance_method = cstrs_immune_system.distance_method.EUCLIDEAN, phi = 0.5, gamma = 0.5, sigma = 1./3., ftol = 1e-15, xtol = 1e-15): + + * algorithm: optimizer to use as 'original' optimization method. Its number of generations should be set to 1. + * algorithm_2: optimizer to use as 'original' optimization method for the evolution of the immune system. + * gen: number of generations. + * select_method: cstrs_immune_system.select_method.BEST_ANTIBODY by default, the method used for selecting the antibodies. + * inject_method: cstrs_immune_system.inject_method.CHAMPION by default, the method used for reinjecting the antibodies. + * distance_method: cstrs_immune_system.distance_method.EUCLIDEAN by default, the method used for computing the distance to the antigenes population. + * Two possibilities are available: CHAMPION, and BEST25. + * phi: 0.5 by default. The feasible fraction selection to compute the mean value. + * gamma: 0.5 by default. The number of antigens selected / number of total antigens. + * sigma: 1/3 by default. The number of antibodies / number of antigens. + * ftol: 1e-15 by default. The stopping criteria on the x tolerance. + * xtol: 1e-15 by default. The stopping criteria on the f tolerance. + """ + arg_list = [] + + if algorithm is None: + algorithm = algorithm.jde() + if algorithm_immune is None: + algorithm_immune = algorithm.jde() + arg_list.append(algorithm) + arg_list.append(algorithm_immune) + arg_list.append(gen) + arg_list.append(select_method) + arg_list.append(inject_method) + arg_list.append(distance_method) + arg_list.append(phi) + arg_list.append(gamma) + arg_list.append(sigma) + arg_list.append(f_tol) + arg_list.append(x_tol) + self._orig_init(*arg_list) cstrs_immune_system._orig_init = cstrs_immune_system.__init__ cstrs_immune_system.__init__ = _cstrs_immune_system_ctor -def _cstrs_core_ctor(self,algorithm = None, repair_algorithm = None, gen = 1, repair_frequency = 10, repair_ratio = 1., f_tol = 1e-15, x_tol = 1e-15): - """ - Constructs CORE (Constrained Optimization by Random Evolution) algorithm for constrained optimization (belong to the family of repairing techniques). - - USAGE: algorithm._cstrs_core(algorithm = _algorithm.jde(), repair_algorithm = _algorithm.jde(), gen = 1, repair_frequency = 10, repair_ratio = 1., f_tol = 1e-15, x_tol = 1e-15): - - * algorithm: optimizer to use as 'original' optimization method. Its number of generations should be set to 1. - * repair_algorithm: optimizer to use as 'repairing' algorithm. It should be able to deal with population of size 1. - * gen: number of generations. - * repair_frequency: The infeasible are repaired at each repair frequency generations. - * repair_ratio: ratio of repaired individuals over infeasible (a ratio of 1 will repair all the individuals). - * ftol: 1e-15 by default. The stopping criteria on the x tolerance. - * xtol: 1e-15 by default. The stopping criteria on the f tolerance. - """ - arg_list=[] - if algorithm==None: - algorithm = algorithm.jde() - if repair_algorithm==None: - repair_algorithm = algorithm.jde() - arg_list.append(algorithm) - arg_list.append(repair_algorithm) - arg_list.append(gen) - arg_list.append(repair_frequency) - arg_list.append(repair_ratio) - arg_list.append(f_tol) - arg_list.append(x_tol) - self._orig_init(*arg_list) + +def _cstrs_core_ctor( + self, + algorithm=None, + repair_algorithm=None, + gen=1, + repair_frequency=10, + repair_ratio=1., + f_tol=1e-15, + x_tol=1e-15): + """ + Constructs CORE (Constrained Optimization by Random Evolution) algorithm for constrained optimization (belong to the family of repairing techniques). + + USAGE: algorithm._cstrs_core(algorithm = _algorithm.jde(), repair_algorithm = _algorithm.jde(), gen = 1, repair_frequency = 10, repair_ratio = 1., f_tol = 1e-15, x_tol = 1e-15): + + * algorithm: optimizer to use as 'original' optimization method. Its number of generations should be set to 1. + * repair_algorithm: optimizer to use as 'repairing' algorithm. It should be able to deal with population of size 1. + * gen: number of generations. + * repair_frequency: The infeasible are repaired at each repair frequency generations. + * repair_ratio: ratio of repaired individuals over infeasible (a ratio of 1 will repair all the individuals). + * ftol: 1e-15 by default. The stopping criteria on the x tolerance. + * xtol: 1e-15 by default. The stopping criteria on the f tolerance. + """ + arg_list = [] + if algorithm is None: + algorithm = algorithm.jde() + if repair_algorithm is None: + repair_algorithm = algorithm.jde() + arg_list.append(algorithm) + arg_list.append(repair_algorithm) + arg_list.append(gen) + arg_list.append(repair_frequency) + arg_list.append(repair_ratio) + arg_list.append(f_tol) + arg_list.append(x_tol) + self._orig_init(*arg_list) cstrs_core._orig_init = cstrs_core.__init__ cstrs_core.__init__ = _cstrs_core_ctor -def _ihs_ctor(self, iter = 100, hmcr = 0.85, par_min = 0.35, par_max = 0.99, bw_min = 1E-5, bw_max = 1): - """ - Constructs an Improved Harmony Search Algorithm - - USAGE: algorithm.ihs(iter = 100, hmcr = 0.85, par_min = 0.35, par_max = 0.99, bw_min = 1E-5, bw_max = 1); - - * iter: number of iterations (improvisations) - * hmcr: rate of choosing from memory (in ]0,1[) - * par_min: minimum pitch adjustment rate (in ]0,1[) - * par_max: maximum pitch adjustment rate (in ]0,1[, > par_min) - * bw_min: minimum distance bandwidth - * bw_max: maximum distance bandwidth (> bw_min) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(iter) - arg_list.append(hmcr) - arg_list.append(par_min) - arg_list.append(par_max) - arg_list.append(bw_min) - arg_list.append(bw_max) - self._orig_init(*arg_list) + +def _ihs_ctor( + self, + iter=100, + hmcr=0.85, + par_min=0.35, + par_max=0.99, + bw_min=1E-5, + bw_max=1): + """ + Constructs an Improved Harmony Search Algorithm + + USAGE: algorithm.ihs(iter = 100, hmcr = 0.85, par_min = 0.35, par_max = 0.99, bw_min = 1E-5, bw_max = 1); + + * iter: number of iterations (improvisations) + * hmcr: rate of choosing from memory (in ]0,1[) + * par_min: minimum pitch adjustment rate (in ]0,1[) + * par_max: maximum pitch adjustment rate (in ]0,1[, > par_min) + * bw_min: minimum distance bandwidth + * bw_max: maximum distance bandwidth (> bw_min) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(iter) + arg_list.append(hmcr) + arg_list.append(par_min) + arg_list.append(par_max) + arg_list.append(bw_min) + arg_list.append(bw_max) + self._orig_init(*arg_list) ihs._orig_init = ihs.__init__ ihs.__init__ = _ihs_ctor -def _cmaes_ctor(self, gen = 500, cc = -1, cs = -1, c1 = -1, cmu = -1, sigma0=0.5, ftol = 1e-6, xtol = 1e-6, memory = False, screen_output = False): - """ - Constructs a Covariance Matrix Adaptation Evolutionary Strategy (C++) - - USAGE: algorithm.cmaes(gen = 500, cc = -1, cs = -1, c1 = -1, cmu = -1, sigma0=0.5, ftol = 1e-6, xtol = 1e-6, memory = False, screen_output = False) - - NOTE: In our variant of the algorithm, particle memory is used to extract the elite and reinsertion - is made aggressively ..... getting rid of the worst guy). Also, the bounds of the problem - are enforced, as to allow PaGMO machinery to work. Fine control on each iteration can be achieved - by calling the algo with memory=True and gen=1 - - * gen: number of generations - * cc: time constant for C cumulation (in [0,1]) if -1 automatic values are set - * cs: time constant for sigma cumulation (in [0,1]) if -1 automatic values are set - * c1: learning rate for rank-1 update (in [0,1]) if -1 automatic values are set - * cmu: learning rate for rank-mu update (in [0,1]) if -1 automatic values are set - * sigma0: starting step (std) - * xtol: stopping criteria on the x tolerance - * ftol: stopping criteria on the f tolerance - * memory: if True the algorithm internal state is saved and used for the next call - * screen_output: activates screen output of the algorithm (do not use in archipealgo, otherwise the screen will be flooded with - * different island outputs) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(gen) - arg_list.append(cc) - arg_list.append(cs) - arg_list.append(c1) - arg_list.append(cmu) - arg_list.append(sigma0) - arg_list.append(ftol) - arg_list.append(xtol) - arg_list.append(memory) - self._orig_init(*arg_list) - self.screen_output = screen_output + +def _cmaes_ctor( + self, + gen=500, + cc=-1, + cs=-1, + c1=-1, + cmu=-1, + sigma0=0.5, + ftol=1e-6, + xtol=1e-6, + memory=False, + screen_output=False): + """ + Constructs a Covariance Matrix Adaptation Evolutionary Strategy (C++) + + USAGE: algorithm.cmaes(gen = 500, cc = -1, cs = -1, c1 = -1, cmu = -1, sigma0=0.5, ftol = 1e-6, xtol = 1e-6, memory = False, screen_output = False) + + NOTE: In our variant of the algorithm, particle memory is used to extract the elite and reinsertion + is made aggressively ..... getting rid of the worst guy). Also, the bounds of the problem + are enforced, as to allow PaGMO machinery to work. Fine control on each iteration can be achieved + by calling the algo with memory=True and gen=1 + + * gen: number of generations + * cc: time constant for C cumulation (in [0,1]) if -1 automatic values are set + * cs: time constant for sigma cumulation (in [0,1]) if -1 automatic values are set + * c1: learning rate for rank-1 update (in [0,1]) if -1 automatic values are set + * cmu: learning rate for rank-mu update (in [0,1]) if -1 automatic values are set + * sigma0: starting step (std) + * xtol: stopping criteria on the x tolerance + * ftol: stopping criteria on the f tolerance + * memory: if True the algorithm internal state is saved and used for the next call + * screen_output: activates screen output of the algorithm (do not use in archipealgo, otherwise the screen will be flooded with + * different island outputs) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(gen) + arg_list.append(cc) + arg_list.append(cs) + arg_list.append(c1) + arg_list.append(cmu) + arg_list.append(sigma0) + arg_list.append(ftol) + arg_list.append(xtol) + arg_list.append(memory) + self._orig_init(*arg_list) + self.screen_output = screen_output cmaes._orig_init = cmaes.__init__ cmaes.__init__ = _cmaes_ctor -def _monte_carlo_ctor(self, iter = 10000): - """ - Constructs a Monte Carlo Algorithm - - USAGE: algorithm.monte_carlo(iter = 10000) - - NOTE: At the end of each iteration, the randomly generated - point substitutes the worst in the population if better - - * iter: number of Monte Carlo runs - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(iter) - self._orig_init(*arg_list) + +def _monte_carlo_ctor(self, iter=10000): + """ + Constructs a Monte Carlo Algorithm + + USAGE: algorithm.monte_carlo(iter = 10000) + + NOTE: At the end of each iteration, the randomly generated + point substitutes the worst in the population if better + + * iter: number of Monte Carlo runs + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(iter) + self._orig_init(*arg_list) monte_carlo._orig_init = monte_carlo.__init__ monte_carlo.__init__ = _monte_carlo_ctor -#NLOPT algorithms (only if PyGMO has been compiled woth nlopt option activated) +# NLOPT algorithms (only if PyGMO has been compiled woth nlopt option +# activated) if "nlopt" in str(_get_algorithm_list()): - def _nlopt_bobyqa_ctor(self, max_iter = 100, ftol = 1e-6, xtol = 1e-6): - """ - Constructs a BOBYQA algorithm (Bound Optimization BY Quadratic Approximation) (NLOPT) - - USAGE: algorithm.nlopt_bobyqa(max_iter = 100, ftol = 1e-6, xtol = 1e-6) - - * max_iter: stop-criteria (number of iterations) - * ftol: stop-criteria (absolute on the obj-fun) - * xtol: stop-criteria (absolute on the chromosome) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(ftol) - arg_list.append(xtol) - self._orig_init(*arg_list) - nlopt_bobyqa._orig_init = nlopt_bobyqa.__init__ - nlopt_bobyqa.__init__ = _nlopt_bobyqa_ctor - - def _nlopt_sbplx_ctor(self, max_iter = 100, ftol = 1e-6, xtol = 1e-6): - """ - Constructs a Subplex (a variant of Nelder-Mead that uses Nelder-Mead on a sequence of subspaces) (NLOPT) - - USAGE: algorithm.nlopt_sbplx(max_iter = 100, ftol = 1e-6, xtol = 1e-6) - - * max_iter: stop-criteria (number of iterations) - * ftol: stop-criteria (absolute on the obj-fun) - * xtol: stop-criteria (absolute on the chromosome) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(ftol) - arg_list.append(xtol) - self._orig_init(*arg_list) - nlopt_sbplx._orig_init = nlopt_sbplx.__init__ - nlopt_sbplx.__init__ = _nlopt_sbplx_ctor - - def _nlopt_cobyla_ctor(self, max_iter = 100, ftol = 1e-6, xtol = 1e-6): - """ - Constructs a Constrained Optimization BY Linear Approximation (COBYLA) algorithm (NLOPT) - - USAGE: algorithm.nlopt_cobyla(max_iter = 100, ftol = 1e-6, xtol = 1e-6) - - * max_iter: stop-criteria (number of iterations) - * ftol: stop-criteria (absolute on the obj-fun) - * xtol: stop-criteria (absolute on the chromosome) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(ftol) - arg_list.append(xtol) - self._orig_init(*arg_list) - nlopt_cobyla._orig_init = nlopt_cobyla.__init__ - nlopt_cobyla.__init__ = _nlopt_cobyla_ctor - - def _nlopt_mma_ctor(self, max_iter = 100, ftol = 1e-6, xtol = 1e-6): - """ - Constructs a Method of Moving Asymptotes (MMA) algorithm (NLOPT) - - USAGE: algorithm.nlopt_mma(max_iter = 100, ftol = 1e-6, xtol = 1e-6) - - * max_iter: stop-criteria (number of iterations) - * ftol: stop-criteria (absolute on the obj-fun) - * xtol: stop-criteria (absolute on the chromosome) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(ftol) - arg_list.append(xtol) - self._orig_init(*arg_list) - nlopt_mma._orig_init = nlopt_mma.__init__ - nlopt_mma.__init__ = _nlopt_mma_ctor - - def _nlopt_auglag_ctor(self, aux_algo_id = 1, max_iter = 100, ftol = 1e-6, xtol = 1e-6, aux_max_iter = 100, aux_ftol = 1e-6, aux_xtol = 1e-6): - """ - Constructs an Augmented agrangian Algotihm (NLOPT) - - USAGE: algorithm.nlopt_mma(aux_algo_id = 1, max_iter = 100, ftol = 1e-6, xtol = 1e-6, aux_max_iter = 100, aux_ftol = 1e-6, aux_xtol = 1e-6) - - * aux_algo_id: auxiliary optimizer id - 1: SBPLX - 2: COBYLA - 3: BOBYQA - 4: Low Storage BFGS - * max_iter: stop-criteria (number of iterations) - * ftol: stop-criteria (absolute on the obj-fun) - * xtol: stop-criteria (absolute on the chromosome) - * aux_max_iter: stop-criteria for the auxiliary optimizer (number of iterations) - * aux_ftol: stop-criteria for the auxiliary optimizer (absolute on the obj-fun) - * aux_xtol: stop-criteria for the auxiliary optimizer (absolute on the chromosome) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(aux_algo_id) - arg_list.append(max_iter) - arg_list.append(ftol) - arg_list.append(xtol) - arg_list.append(aux_max_iter) - arg_list.append(aux_ftol) - arg_list.append(aux_xtol) - self._orig_init(*arg_list) - nlopt_auglag._orig_init = nlopt_auglag.__init__ - nlopt_auglag.__init__ = _nlopt_auglag_ctor - - def _nlopt_auglag_eq_ctor(self, aux_algo_id = 1, max_iter = 100, ftol = 1e-6, xtol = 1e-6, aux_max_iter = 100, aux_ftol = 1e-6, aux_xtol = 1e-6): - """ - Constructs an Augmented agrangian Algotihm (using penalties only for the equalities) (NLOPT) - - USAGE: algorithm.nlopt_auglag_eq(aux_algo_id = 1, max_iter = 100, ftol = 1e-6, xtol = 1e-6, aux_max_iter = 100, aux_ftol = 1e-6, aux_xtol = 1e-6) - - * aux_algo_id: auxiliary (local) optimizer id - 1: COBYLA - 2: MMA - * max_iter: stop-criteria (number of iterations) - * ftol: stop-criteria (absolute on the obj-fun) - * xtol: stop-criteria (absolute on the chromosome) - * aux_max_iter: stop-criteria for the auxiliary optimizer (number of iterations) - * aux_ftol: stop-criteria for the auxiliary optimizer (absolute on the obj-fun) - * aux_xtol: stop-criteria for the auxiliary optimizer (absolute on the chromosome) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(aux_algo_id) - arg_list.append(max_iter) - arg_list.append(ftol) - arg_list.append(xtol) - arg_list.append(aux_max_iter) - arg_list.append(aux_ftol) - arg_list.append(aux_xtol) - self._orig_init(*arg_list) - nlopt_auglag_eq._orig_init = nlopt_auglag_eq.__init__ - nlopt_auglag_eq.__init__ = _nlopt_auglag_eq_ctor - - def _nlopt_slsqp_ctor(self, max_iter = 100, ftol = 1e-6, xtol = 1e-6): - """ - Constructs a Sequential Least SQuares Programming algorithm (SLSQP) algorithm (NLOPT) - - USAGE: algorithm.nlopt_slsqp(max_iter = 100, ftol = 1e-6, xtol = 1e-6) - - * max_iter: stop-criteria (number of iterations) - * ftol: stop-criteria (absolute on the obj-fun) - * xtol: stop-criteria (absolute on the chromosome) - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(ftol) - arg_list.append(xtol) - self._orig_init(*arg_list) - nlopt_slsqp._orig_init = nlopt_slsqp.__init__ - nlopt_slsqp.__init__ = _nlopt_slsqp_ctor - -#GSL algorithms (only if PyGMO has been compiled with gsl option activated) + def _nlopt_bobyqa_ctor(self, max_iter=100, ftol=1e-6, xtol=1e-6): + """ + Constructs a BOBYQA algorithm (Bound Optimization BY Quadratic Approximation) (NLOPT) + + USAGE: algorithm.nlopt_bobyqa(max_iter = 100, ftol = 1e-6, xtol = 1e-6) + + * max_iter: stop-criteria (number of iterations) + * ftol: stop-criteria (absolute on the obj-fun) + * xtol: stop-criteria (absolute on the chromosome) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(ftol) + arg_list.append(xtol) + self._orig_init(*arg_list) + nlopt_bobyqa._orig_init = nlopt_bobyqa.__init__ + nlopt_bobyqa.__init__ = _nlopt_bobyqa_ctor + + def _nlopt_sbplx_ctor(self, max_iter=100, ftol=1e-6, xtol=1e-6): + """ + Constructs a Subplex (a variant of Nelder-Mead that uses Nelder-Mead on a sequence of subspaces) (NLOPT) + + USAGE: algorithm.nlopt_sbplx(max_iter = 100, ftol = 1e-6, xtol = 1e-6) + + * max_iter: stop-criteria (number of iterations) + * ftol: stop-criteria (absolute on the obj-fun) + * xtol: stop-criteria (absolute on the chromosome) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(ftol) + arg_list.append(xtol) + self._orig_init(*arg_list) + nlopt_sbplx._orig_init = nlopt_sbplx.__init__ + nlopt_sbplx.__init__ = _nlopt_sbplx_ctor + + def _nlopt_cobyla_ctor(self, max_iter=100, ftol=1e-6, xtol=1e-6): + """ + Constructs a Constrained Optimization BY Linear Approximation (COBYLA) algorithm (NLOPT) + + USAGE: algorithm.nlopt_cobyla(max_iter = 100, ftol = 1e-6, xtol = 1e-6) + + * max_iter: stop-criteria (number of iterations) + * ftol: stop-criteria (absolute on the obj-fun) + * xtol: stop-criteria (absolute on the chromosome) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(ftol) + arg_list.append(xtol) + self._orig_init(*arg_list) + nlopt_cobyla._orig_init = nlopt_cobyla.__init__ + nlopt_cobyla.__init__ = _nlopt_cobyla_ctor + + def _nlopt_mma_ctor(self, max_iter=100, ftol=1e-6, xtol=1e-6): + """ + Constructs a Method of Moving Asymptotes (MMA) algorithm (NLOPT) + + USAGE: algorithm.nlopt_mma(max_iter = 100, ftol = 1e-6, xtol = 1e-6) + + * max_iter: stop-criteria (number of iterations) + * ftol: stop-criteria (absolute on the obj-fun) + * xtol: stop-criteria (absolute on the chromosome) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(ftol) + arg_list.append(xtol) + self._orig_init(*arg_list) + nlopt_mma._orig_init = nlopt_mma.__init__ + nlopt_mma.__init__ = _nlopt_mma_ctor + + def _nlopt_auglag_ctor( + self, + aux_algo_id=1, + max_iter=100, + ftol=1e-6, + xtol=1e-6, + aux_max_iter=100, + aux_ftol=1e-6, + aux_xtol=1e-6): + """ + Constructs an Augmented agrangian Algotihm (NLOPT) + + USAGE: algorithm.nlopt_mma(aux_algo_id = 1, max_iter = 100, ftol = 1e-6, xtol = 1e-6, aux_max_iter = 100, aux_ftol = 1e-6, aux_xtol = 1e-6) + + * aux_algo_id: auxiliary optimizer id + 1: SBPLX + 2: COBYLA + 3: BOBYQA + 4: Low Storage BFGS + * max_iter: stop-criteria (number of iterations) + * ftol: stop-criteria (absolute on the obj-fun) + * xtol: stop-criteria (absolute on the chromosome) + * aux_max_iter: stop-criteria for the auxiliary optimizer (number of iterations) + * aux_ftol: stop-criteria for the auxiliary optimizer (absolute on the obj-fun) + * aux_xtol: stop-criteria for the auxiliary optimizer (absolute on the chromosome) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(aux_algo_id) + arg_list.append(max_iter) + arg_list.append(ftol) + arg_list.append(xtol) + arg_list.append(aux_max_iter) + arg_list.append(aux_ftol) + arg_list.append(aux_xtol) + self._orig_init(*arg_list) + nlopt_auglag._orig_init = nlopt_auglag.__init__ + nlopt_auglag.__init__ = _nlopt_auglag_ctor + + def _nlopt_auglag_eq_ctor( + self, + aux_algo_id=1, + max_iter=100, + ftol=1e-6, + xtol=1e-6, + aux_max_iter=100, + aux_ftol=1e-6, + aux_xtol=1e-6): + """ + Constructs an Augmented agrangian Algotihm (using penalties only for the equalities) (NLOPT) + + USAGE: algorithm.nlopt_auglag_eq(aux_algo_id = 1, max_iter = 100, ftol = 1e-6, xtol = 1e-6, aux_max_iter = 100, aux_ftol = 1e-6, aux_xtol = 1e-6) + + * aux_algo_id: auxiliary (local) optimizer id + 1: COBYLA + 2: MMA + * max_iter: stop-criteria (number of iterations) + * ftol: stop-criteria (absolute on the obj-fun) + * xtol: stop-criteria (absolute on the chromosome) + * aux_max_iter: stop-criteria for the auxiliary optimizer (number of iterations) + * aux_ftol: stop-criteria for the auxiliary optimizer (absolute on the obj-fun) + * aux_xtol: stop-criteria for the auxiliary optimizer (absolute on the chromosome) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(aux_algo_id) + arg_list.append(max_iter) + arg_list.append(ftol) + arg_list.append(xtol) + arg_list.append(aux_max_iter) + arg_list.append(aux_ftol) + arg_list.append(aux_xtol) + self._orig_init(*arg_list) + nlopt_auglag_eq._orig_init = nlopt_auglag_eq.__init__ + nlopt_auglag_eq.__init__ = _nlopt_auglag_eq_ctor + + def _nlopt_slsqp_ctor(self, max_iter=100, ftol=1e-6, xtol=1e-6): + """ + Constructs a Sequential Least SQuares Programming algorithm (SLSQP) algorithm (NLOPT) + + USAGE: algorithm.nlopt_slsqp(max_iter = 100, ftol = 1e-6, xtol = 1e-6) + + * max_iter: stop-criteria (number of iterations) + * ftol: stop-criteria (absolute on the obj-fun) + * xtol: stop-criteria (absolute on the chromosome) + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(ftol) + arg_list.append(xtol) + self._orig_init(*arg_list) + nlopt_slsqp._orig_init = nlopt_slsqp.__init__ + nlopt_slsqp.__init__ = _nlopt_slsqp_ctor + +# GSL algorithms (only if PyGMO has been compiled with gsl option activated) if "gsl" in str(_get_algorithm_list()): - def _gsl_bfgs_ctor(self, max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001): - """ - Constructs a BFGS Algorithm (GSL) - - USAGE: algorithm.gsl_bfgs(max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001) - - * max_iter: maximum number of iterations - * step_size: size of the first trial step. - * tol: accuracy of the line minimisation. - * grad_step_size: step size for the numerical computation of the gradient. - * grad_tol: tolerance when testing the norm of the gradient as stopping criterion. - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(grad_tol) - arg_list.append(grad_step_size) - arg_list.append(tol) - arg_list.append(step_size) - self._orig_init(*arg_list) - gsl_bfgs._orig_init = gsl_bfgs.__init__ - gsl_bfgs.__init__ = _gsl_bfgs_ctor - - def _gsl_bfgs2_ctor(self, max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001): - """ - Constructs a BFGS2 Algorithm (GSL) - - NOTE: in GSL, BFGS2 is a more efficient version of BFGS - - USAGE: algorithm.gsl_bfgs2(max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001); - - * max_iter: maximum number of iterations - * step_size: size of the first trial step. - * tol: accuracy of the line minimisation. - * grad_step_size: step size for the numerical computation of the gradient. - * grad_tol: tolerance when testing the norm of the gradient as stopping criterion. - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(grad_tol) - arg_list.append(grad_step_size) - arg_list.append(tol) - arg_list.append(step_size) - self._orig_init(*arg_list) - gsl_bfgs2._orig_init = gsl_bfgs2.__init__ - gsl_bfgs2.__init__ = _gsl_bfgs2_ctor - - def _gsl_fr_ctor(self, max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001): - """ - Constructs a Fletcher-Reeves conjugate gradient (GSL) - - USAGE: algorithm.gsl_fr(max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001) - - * max_iter: maximum number of iterations - * step_size: size of the first trial step. - * tol: accuracy of the line minimisation. - * grad_step_size: step size for the numerical computation of the gradient. - * grad_tol: tolerance when testing the norm of the gradient as stopping criterion. - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(grad_tol) - arg_list.append(grad_step_size) - arg_list.append(tol) - arg_list.append(step_size) - self._orig_init(*arg_list) - gsl_fr._orig_init = gsl_fr.__init__ - gsl_fr.__init__ = _gsl_fr_ctor - - def _gsl_pr_ctor(self, max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001): - """ - Constructs a Polak-Ribiere conjugate gradient (GSL) - - USAGE: algorithm.gsl_pr2(max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001); - - * max_iter: maximum number of iterations - * step_size: size of the first trial step. - * tol: accuracy of the line minimisation. - * grad_step_size: step size for the numerical computation of the gradient. - * grad_tol: tolerance when testing the norm of the gradient as stopping criterion. - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(grad_tol) - arg_list.append(grad_step_size) - arg_list.append(tol) - arg_list.append(step_size) - self._orig_init(*arg_list) - gsl_pr._orig_init = gsl_pr.__init__ - gsl_pr.__init__ = _gsl_pr_ctor - - def _gsl_nm_ctor(self, max_iter = 100, step_size = 1e-8, tol = 1e-8): - """ - Constructs a Nelder-Mead Algorithm (GSL) - - USAGE: algorithm.gsl_nm(max_iter = 100, step_size = 1e-8, tol = 1e-8); - - * max_iter: maximum number of iterations - * step_size: size of the first trial step. - * tol: accuracy of the line minimisation. - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(tol) - arg_list.append(step_size) - self._orig_init(*arg_list) - gsl_nm._orig_init = gsl_nm.__init__ - gsl_nm.__init__ = _gsl_nm_ctor - - def _gsl_nm2_ctor(self, max_iter = 100, step_size = 1e-8, tol = 1e-8): - """ - Constructs a Nelder-Mead algorithm (Variant2) (GSL) - - USAGE: algorithm.gsl_nm2(max_iter = 100, step_size = 1e-8, tol = 1e-8) - - * max_iter: maximum number of iterations - * step_size: size of the first trial step. - * tol: accuracy of the line minimisation. - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(tol) - arg_list.append(step_size) - self._orig_init(*arg_list) - gsl_nm2._orig_init = gsl_nm2.__init__ - gsl_nm2.__init__ = _gsl_nm2_ctor - - def _gsl_nm2rand_ctor(self, max_iter = 100, step_size = 1e-8, tol = 1e-8): - """ - Constructs a Nelder-Mead algorithm (Variant2 + randomly oriented initial simplex) (GSL) - - USAGE: algorithm.gsl_nm2rand(max_iter = 100, step_size = 1e-8, tol = 1e-8); - - * max_iter: maximum number of iterations - * step_size: size of the first trial step. - * tol: accuracy of the line minimisation. - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(tol) - arg_list.append(step_size) - self._orig_init(*arg_list) - gsl_nm2rand._orig_init = gsl_nm2rand.__init__ - gsl_nm2rand.__init__ = _gsl_nm2rand_ctor - -#IPOPT algorithm (only if PyGMO has been compiled with the ipopt option activated) + def _gsl_bfgs_ctor( + self, + max_iter=100, + step_size=1e-8, + tol=1e-8, + grad_step_size=0.01, + grad_tol=0.0001): + """ + Constructs a BFGS Algorithm (GSL) + + USAGE: algorithm.gsl_bfgs(max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001) + + * max_iter: maximum number of iterations + * step_size: size of the first trial step. + * tol: accuracy of the line minimisation. + * grad_step_size: step size for the numerical computation of the gradient. + * grad_tol: tolerance when testing the norm of the gradient as stopping criterion. + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(grad_tol) + arg_list.append(grad_step_size) + arg_list.append(tol) + arg_list.append(step_size) + self._orig_init(*arg_list) + gsl_bfgs._orig_init = gsl_bfgs.__init__ + gsl_bfgs.__init__ = _gsl_bfgs_ctor + + def _gsl_bfgs2_ctor( + self, + max_iter=100, + step_size=1e-8, + tol=1e-8, + grad_step_size=0.01, + grad_tol=0.0001): + """ + Constructs a BFGS2 Algorithm (GSL) + + NOTE: in GSL, BFGS2 is a more efficient version of BFGS + + USAGE: algorithm.gsl_bfgs2(max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001); + + * max_iter: maximum number of iterations + * step_size: size of the first trial step. + * tol: accuracy of the line minimisation. + * grad_step_size: step size for the numerical computation of the gradient. + * grad_tol: tolerance when testing the norm of the gradient as stopping criterion. + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(grad_tol) + arg_list.append(grad_step_size) + arg_list.append(tol) + arg_list.append(step_size) + self._orig_init(*arg_list) + gsl_bfgs2._orig_init = gsl_bfgs2.__init__ + gsl_bfgs2.__init__ = _gsl_bfgs2_ctor + + def _gsl_fr_ctor( + self, + max_iter=100, + step_size=1e-8, + tol=1e-8, + grad_step_size=0.01, + grad_tol=0.0001): + """ + Constructs a Fletcher-Reeves conjugate gradient (GSL) + + USAGE: algorithm.gsl_fr(max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001) + + * max_iter: maximum number of iterations + * step_size: size of the first trial step. + * tol: accuracy of the line minimisation. + * grad_step_size: step size for the numerical computation of the gradient. + * grad_tol: tolerance when testing the norm of the gradient as stopping criterion. + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(grad_tol) + arg_list.append(grad_step_size) + arg_list.append(tol) + arg_list.append(step_size) + self._orig_init(*arg_list) + gsl_fr._orig_init = gsl_fr.__init__ + gsl_fr.__init__ = _gsl_fr_ctor + + def _gsl_pr_ctor( + self, + max_iter=100, + step_size=1e-8, + tol=1e-8, + grad_step_size=0.01, + grad_tol=0.0001): + """ + Constructs a Polak-Ribiere conjugate gradient (GSL) + + USAGE: algorithm.gsl_pr2(max_iter = 100, step_size = 1e-8, tol = 1e-8, grad_step_size = 0.01, grad_tol = 0.0001); + + * max_iter: maximum number of iterations + * step_size: size of the first trial step. + * tol: accuracy of the line minimisation. + * grad_step_size: step size for the numerical computation of the gradient. + * grad_tol: tolerance when testing the norm of the gradient as stopping criterion. + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(grad_tol) + arg_list.append(grad_step_size) + arg_list.append(tol) + arg_list.append(step_size) + self._orig_init(*arg_list) + gsl_pr._orig_init = gsl_pr.__init__ + gsl_pr.__init__ = _gsl_pr_ctor + + def _gsl_nm_ctor(self, max_iter=100, step_size=1e-8, tol=1e-8): + """ + Constructs a Nelder-Mead Algorithm (GSL) + + USAGE: algorithm.gsl_nm(max_iter = 100, step_size = 1e-8, tol = 1e-8); + + * max_iter: maximum number of iterations + * step_size: size of the first trial step. + * tol: accuracy of the line minimisation. + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(tol) + arg_list.append(step_size) + self._orig_init(*arg_list) + gsl_nm._orig_init = gsl_nm.__init__ + gsl_nm.__init__ = _gsl_nm_ctor + + def _gsl_nm2_ctor(self, max_iter=100, step_size=1e-8, tol=1e-8): + """ + Constructs a Nelder-Mead algorithm (Variant2) (GSL) + + USAGE: algorithm.gsl_nm2(max_iter = 100, step_size = 1e-8, tol = 1e-8) + + * max_iter: maximum number of iterations + * step_size: size of the first trial step. + * tol: accuracy of the line minimisation. + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(tol) + arg_list.append(step_size) + self._orig_init(*arg_list) + gsl_nm2._orig_init = gsl_nm2.__init__ + gsl_nm2.__init__ = _gsl_nm2_ctor + + def _gsl_nm2rand_ctor(self, max_iter=100, step_size=1e-8, tol=1e-8): + """ + Constructs a Nelder-Mead algorithm (Variant2 + randomly oriented initial simplex) (GSL) + + USAGE: algorithm.gsl_nm2rand(max_iter = 100, step_size = 1e-8, tol = 1e-8); + + * max_iter: maximum number of iterations + * step_size: size of the first trial step. + * tol: accuracy of the line minimisation. + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(tol) + arg_list.append(step_size) + self._orig_init(*arg_list) + gsl_nm2rand._orig_init = gsl_nm2rand.__init__ + gsl_nm2rand.__init__ = _gsl_nm2rand_ctor + +# IPOPT algorithm (only if PyGMO has been compiled with the ipopt option +# activated) if "ipopt" in str(_get_algorithm_list()): - def _ipopt_ctor(self, max_iter = 100, constr_viol_tol = 1e-08, dual_inf_tol = 1e-08, compl_inf_tol = 1e-08, - nlp_scaling_method = True, obj_scaling_factor = 1.0, mu_init = 0.1, screen_output = False): - """ - Constructs an Interior Point OPTimization Algorithm (IPOPT) - - USAGE: algorithm.ipopt(major_iter = 100, constr_viol_tol = 1e-08, dual_inf_tol = 1e-08, compl_inf_tol = 1e-08, screen_output = False); - - * max_iter: Maximum number of major iterations - * constr_viol_tol: Constraint violation tolerance - * dual_inf_tol: Dual infeasibility tolerance - * compl_inf_tol: Complementary feasibility tolerance - * nlp_scaling_method Select if the "gradient-based" scaling of the NLP should be used - * obj_scaling_factor Scaling factor for the objective function. - * mu_init Initial value for the barrier parameter. - * screen_output: Activates output on screen - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(max_iter) - arg_list.append(constr_viol_tol) - arg_list.append(dual_inf_tol) - arg_list.append(compl_inf_tol) - arg_list.append(nlp_scaling_method) - arg_list.append(obj_scaling_factor) - arg_list.append(mu_init) - self._orig_init(*arg_list) - self.screen_output = screen_output - ipopt._orig_init = ipopt.__init__ - ipopt.__init__ = _ipopt_ctor - -#SNOPT algorithm (only if PyGMO has been compiled with the snopt option activated) + def _ipopt_ctor( + self, + max_iter=100, + constr_viol_tol=1e-08, + dual_inf_tol=1e-08, + compl_inf_tol=1e-08, + nlp_scaling_method=True, + obj_scaling_factor=1.0, + mu_init=0.1, + screen_output=False): + """ + Constructs an Interior Point OPTimization Algorithm (IPOPT) + + USAGE: algorithm.ipopt(major_iter = 100, constr_viol_tol = 1e-08, dual_inf_tol = 1e-08, compl_inf_tol = 1e-08, screen_output = False); + + * max_iter: Maximum number of major iterations + * constr_viol_tol: Constraint violation tolerance + * dual_inf_tol: Dual infeasibility tolerance + * compl_inf_tol: Complementary feasibility tolerance + * nlp_scaling_method Select if the "gradient-based" scaling of the NLP should be used + * obj_scaling_factor Scaling factor for the objective function. + * mu_init Initial value for the barrier parameter. + * screen_output: Activates output on screen + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(max_iter) + arg_list.append(constr_viol_tol) + arg_list.append(dual_inf_tol) + arg_list.append(compl_inf_tol) + arg_list.append(nlp_scaling_method) + arg_list.append(obj_scaling_factor) + arg_list.append(mu_init) + self._orig_init(*arg_list) + self.screen_output = screen_output + ipopt._orig_init = ipopt.__init__ + ipopt.__init__ = _ipopt_ctor + +# SNOPT algorithm (only if PyGMO has been compiled with the snopt option +# activated) if "snopt" in str(_get_algorithm_list()): - def _snopt_ctor(self,major_iter = 100, feas_tol = 1e-6, opt_tol = 1e-6, screen_output = False): - """ - Constructs SNOPT Algorithm - - USAGE: algorithm.snopt(major_iter = 100, feas_tol = 1e-6, opt_tol = 1e-6, screen_output = False); - - * major_iter: Maximum number of major iterations - * feas_tol: Feasibility tolerance - * opt_tol: Optimality tolerance - * screen_output: Activates output on screen - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(major_iter) - arg_list.append(feas_tol) - arg_list.append(opt_tol) - self._orig_init(*arg_list) - self.screen_output = screen_output - snopt._orig_init = snopt.__init__ - snopt.__init__ = _snopt_ctor - + def _snopt_ctor( + self, + major_iter=100, + feas_tol=1e-6, + opt_tol=1e-6, + screen_output=False): + """ + Constructs SNOPT Algorithm + + USAGE: algorithm.snopt(major_iter = 100, feas_tol = 1e-6, opt_tol = 1e-6, screen_output = False); + + * major_iter: Maximum number of major iterations + * feas_tol: Feasibility tolerance + * opt_tol: Optimality tolerance + * screen_output: Activates output on screen + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(major_iter) + arg_list.append(feas_tol) + arg_list.append(opt_tol) + self._orig_init(*arg_list) + self.screen_output = screen_output + snopt._orig_init = snopt.__init__ + snopt.__init__ = _snopt_ctor diff --git a/PyGMO/algorithm/_base.py b/PyGMO/algorithm/_base.py index 39d1f1fb..97599a84 100644 --- a/PyGMO/algorithm/_base.py +++ b/PyGMO/algorithm/_base.py @@ -1,14 +1,19 @@ # -*- coding: utf-8 -*- -from _algorithm import _base +from PyGMO.algorithm._algorithm import _base + class base(_base): - """ - All Algorithms written in Python derive from this class - """ - def __init__(self): - _base.__init__(self) - def get_name(self): - return str(type(self)) - def __get_deepcopy__(self): - from copy import deepcopy - return deepcopy(self) + + """ + All Algorithms written in Python derive from this class + """ + + def __init__(self): + _base.__init__(self) + + def get_name(self): + return str(type(self)) + + def __get_deepcopy__(self): + from copy import deepcopy + return deepcopy(self) diff --git a/PyGMO/algorithm/_cmaes.py b/PyGMO/algorithm/_cmaes.py index 60cf1489..9221cb70 100644 --- a/PyGMO/algorithm/_cmaes.py +++ b/PyGMO/algorithm/_cmaes.py @@ -1,269 +1,338 @@ -from _base import base +from PyGMO.algorithm._base import base + class py_cmaes(base): - """ - Covariance Matrix Adaptation Evolutionary Strategy (Python) - """ - def __init__(self, gen = 500, cc = -1, cs = -1, c1 = -1, cmu = -1, sigma0=0.5, ftol = 1e-6, xtol = 1e-6, memory = False, screen_output = False): - """ - Constructs a Covariance Matrix Adaptation Evolutionary Strategy (Python) - - USAGE: algorithm.py_cmaes(gen = 500, cc = -1, cs = -1, c1 = -1, cmu = -1, sigma0=0.5, ftol = 1e-6, xtol = 1e-6, memory = False, screen_output = False) - - NOTE: In our variant of the algorithm, particle memory is used to extract the elite and reinsertion - is made aggressively ..... getting rid of the worst guy). Also, the bounds of the problem - are enforced, as to allow PaGMO machinery to work. Fine control on each iteration can be achieved - by calling the algo with gen=1 (algo state is stored, cmaes will continue at next call ... without - initializing again all its state!!) - - * gen: number of generations - * cc: time constant for C cumulation (in [0,1]) if -1 automatic values are set - * cs: time constant for sigma cumulation (in [0,1]) if -1 automatic values are set - * c1: learning rate for rank-1 update (in [0,1]) if -1 automatic values are set - * cmu: learning rate for rank-mu update (in [0,1]) if -1 automatic values are set - * sigma0: starting step (std) - * xtol: stopping criteria on the x tolerance - * ftol: stopping criteria on the f tolerance - * memory: when True the algorithm preserves memory of covariance, step and more between successive runs - * screen_output: activates screen_output (output at each generation) - """ - try: - import numpy as np - except ImportError: - raise ImportError("This algorithm needs numpy to run. Is numpy installed?") - - if ( gen <= 0 ): - raise ValueError("gen needs to be > 0") - - if ( (cc<0 or cc>1) and not cc==-1): - raise ValueError("cc needs to be in [0,1] or -1 for auto value") - - if ( (cs<0 or cs>1) and not cc==-1): - raise ValueError("cs needs to be in [0,1] or -1 for auto value") - - if ( (c1<0 or c1>1) and not cc==-1): - raise ValueError("c1 needs to be in [0,1] or -1 for auto value") - - if ( (cmu<0 or cmu>1) and not cc==-1): - raise ValueError("cmu needs to be in [0,1] or -1 for auto value") - - base.__init__(self) - - #Data members - self.__cc = cc - self.__cs = cs - self.__c1 = c1 - self.__cmu = cmu - self.__gen = gen - self.__xtol = xtol - self.__ftol = ftol - self.__sigma0 = sigma0 - self.__memory = memory - self.screen_output = screen_output - - #Algorithm memory - self.__mean = 0 - self.__variation = 0 - self.__newpop = np.matrix([[1]]) - self.__B = 0 - self.__D = 0 - self.__C = 0 - self.__invsqrtC = 0 - self.__pc = 0 - self.__ps = 0 - self.__counteval = 0 - self.__eigeneval = 0 - - np.random.seed() - - def evolve(self,pop): - from numpy import matrix,array,log, diag, eye,sqrt, exp, ones - from numpy.random import normal, random - from numpy.linalg import norm, eig - - # Let's rename some variables - prob = pop.problem - lb = prob.lb - ub = prob.ub - dim, cont_dim, int_dim, c_dim, f_dim = prob.dimension, prob.dimension - prob.i_dimension, prob.i_dimension, prob.c_dimension, prob.f_dimension - - # And perform checks on the problem type - if cont_dim == 0: - raise ValueError("There is no continuous dimension for CMAES to optimise!!") - - if c_dim > 0: - raise ValueError("This version of CMAES is not suitable for constrained optimisation") - - if int_dim > 0: - raise ValueError("The chromosome has an integer part .... this version of CMAES is not able to deal with it") - if f_dim > 1: - raise ValueError("The problem is not single objective and CMAES is not suitable to solve it") - - if len(pop) < 5: - raise ValueError("for CMAES at least 5 individuals in the population are required") - - # Setting sizes ..... - N = dim - lam = len(pop) - mu = lam/2 - - # Setting coefficients for Selection - weights = [log(mu+0.5) - log(i+1) for i in range(mu)] - sumW = sum(weights) - weights = [w/sumW for w in weights]; # weights for weighted recombination - mueff = 1.0 / sum(w**2 for w in weights) # variance-effectiveness of sum w_i x_i - - # Setting coefficients for Adaptation automatically or to user defined data - cc = self.__cc; cs = self.__cs; c1 = self.__c1; cmu = self.__cmu; - if self.__cc == -1: - cc = (4 + mueff/N) / (N+4 + 2*mueff/N); # t-const for cumulation for C - if self.__cs == -1: - cs = (mueff+2) / (N+mueff+5); # t-const for cumulation for sigma control - if self.__c1 == -1: - c1 = 2 / ((N+1.3)**2+mueff); # learning rate for rank-one update of C - if self.__cmu == -1: - cmu = 2 * (mueff-2+1/mueff) / ((N+2)**2+mueff); # and for rank-mu update - - damps = 1 + 2*max(0, sqrt((mueff-1)/(N+1))-1) + cs; #damping for sigma - chiN = N**0.5*(1-1.0/(4*N)+1.0/(21*N**2)) #expectation of ||N(0,I)|| == norm(randn(N,1)) - - # Initializing and allocating - if ( self.__newpop.shape==(N,lam) ) and (self.__memory): - mean = self.__mean - variation = self.__variation - newpop = self.__newpop - B = self.__B - D = self.__D - C = self.__C - invsqrtC = self.__invsqrtC - pc = self.__pc - ps = self.__ps - counteval = self.__counteval - eigeneval = self.__eigeneval - else: - mean = matrix(pop.champion.x).T - variation = array([[0.0]*N]*lam) - newpop = matrix([[0.0]*lam]*N) - B = matrix(eye(N,N)); #B defines the coordinate system - D = ones(N); #diagonal D defines the scaling - C = matrix(eye(N,N)); #covariance matrix C - invsqrtC = matrix(eye(N,N)); #inverse of sqrt(C) - pc =matrix([[0]]*N) - ps = matrix([[0]]*N) - counteval = 0 - eigeneval = 0 - - sigma=self.__sigma0 - - if self.screen_output: - print "CMAES 4 PaGMO (Python)\n" - print "mu: " + str(mu) + " - lambda: " + str(lam) + " - N: " + str(N) + " - muef: " + str(mueff) + "\n" - print "cc: " + str(cc ) + " - cs: " + str(cs) + " - c1: " + str(c1) + " - cmu: " + str(cmu) + " - sigma: " + str(sigma) + " - damps: " + str(damps) + " - chiN: " + str(chiN) + "\n" - - # Let's start the algorithm - for gen in range(self.__gen): - - #1 - We generate and evaluate lam new individuals - variation = [B * diag(D) * normal(0,1,[dim,1]) for i in range(lam)] - variation = [[j[0,0] for j in matr] for matr in variation] - for i,d_mu in enumerate(variation): - newpop[:,i] = mean + sigma * matrix(d_mu).T - - #fixing the bounds - for row in range(newpop.shape[0]): - for col in range(newpop.shape[1]): - if newpop[row,col] > ub[row]: - newpop[row,col] = lb[row] + random()*(ub[row]-lb[row]) - elif newpop[row,col] < lb[row]: - newpop[row,col] = lb[row] + random()*(ub[row]-lb[row]) - - #insert in population - for i in range(lam): - idx = pop.get_worst_idx() - pop.set_x(idx,[newpop[j,i] for j in range(N)]) - counteval += lam - - #2 - We extract the elite from this generation - #a = sorted(pop,lambda x,y: cmp(x.cur_f,y.cur_f)) - elite = [matrix(pop[idx].best_x).T for idx in pop.get_best_idx(mu)] - #elite = [matrix(ind.cur_x).T for ind in a] - #elite = elite[:mu] - - #3 - Compute the new elite mean storing the old one - meanold=mean - mean = elite[0]*weights[0] - for i in range(1,mu): - mean += elite[i]*weights[i] - - #4 - Update evolution paths - ps = (1 - cs)*ps + sqrt(cs*(2-cs)*mueff)* invsqrtC * (mean-meanold) / sigma - hsig = ((ps.T*ps)[0,0] / (1-(1-cs)**(2.0*counteval/lam)) / N) < (2.0 + 4.0/(N+1)); - hsig = int(hsig) - pc = (1-cc) * pc + hsig * sqrt(cc*(2-cc)*mueff) * (mean-meanold) / sigma; - - #5 - Adapt Covariance Matrix - Cold = C - C = (elite[0]-meanold)*(elite[0]-meanold).T*weights[0] - for i in range(1,mu): - C += (elite[i]-meanold)*(elite[i]-meanold).T*weights[i] - C /= sigma**2 - C = (1-c1-cmu)*Cold + cmu*C + c1 * ((pc * pc.T) + (1-hsig) * cc*(2-cc) * Cold) - - #6 - Adapt sigma - sigma *= exp( (cs/damps)*(norm(ps)/chiN - 1)); - - #7 - Perform eigen-decomposition of C - if ( (counteval - eigeneval) > (lam/(c1+cmu)/N/10) ): #achieve O(N^2) - eigeneval = counteval; - C = (C+C.T)/2 #enforce symmetry - D,B = eig(C); #eigen decomposition, B==normalized eigenvectors - D = [s**0.5 for s in D] #D contains standard deviations now - #if not (0 in D): #Avoids numerical nans skipping evaluation of invsqrtC - invsqrtC = B*diag([1/d for d in D])*B.T - - #8 - Print to screen if necessary - if self.screen_output: - if not(gen%20): - print "\nGen.\tChampion\tHighest\t\tLowest\t\tVariation\t\tStep" - print "%d\t%e\t%e\t%e\t%e\t%e" % (gen,pop.champion.f[0], - max([ind.cur_f[0] for ind in pop]),min([ind.cur_f[0] for ind in pop]), - norm(d_mu), sigma) - - #9 - Check the exit conditions (every 40 generations) - if not(gen%40): - if (norm(d_mu) < self.__xtol): - if self.screen_output: - print("Exit condition -- xtol < ") + str(self.__xtol) - return pop - - tmp = abs(pop[pop.get_worst_idx()].best_f[0] - pop[pop.get_best_idx()].best_f[0]) - - if (tmp < self.__ftol): - if self.screen_output: - print("Exit condition -- ftol < ") + str(self.__ftol) - return pop - - #Update algorithm memory - if self.__memory: - self.__mean = mean - self.__variation = variation - self.__newpop = newpop - self.__B = B - self.__D = D - self.__C = C - self.__invsqrtC = invsqrtC - self.__pc = pc - self.__ps = ps - self.__counteval = counteval - self.__eigeneval = eigeneval - self.__sigma0 = sigma - - if self.screen_output: - print("Exit condition -- iteration > ") + str(self.__gen) - return pop - - - def get_name(self): - return "CMAES (Python)" - def human_readable_extra(self): - return "gen=" + str(self.__gen) + " cc=" + str(self.__cc) + " cs=" + str(self.__cs) + " c1=" + str(self.__c1) + " cmu=" + str(self.__cmu) + " sigma0=" + str(self.__sigma0) + " xtol=" + str(self.__xtol) + " ftol=" + str(self.__ftol) + + """ + Covariance Matrix Adaptation Evolutionary Strategy (Python) + """ + + def __init__( + self, + gen=500, + cc=-1, + cs=-1, + c1=-1, + cmu=-1, + sigma0=0.5, + ftol=1e-6, + xtol=1e-6, + memory=False, + screen_output=False): + """ + Constructs a Covariance Matrix Adaptation Evolutionary Strategy (Python) + + USAGE: algorithm.py_cmaes(gen = 500, cc = -1, cs = -1, c1 = -1, cmu = -1, sigma0=0.5, ftol = 1e-6, xtol = 1e-6, memory = False, screen_output = False) + + NOTE: In our variant of the algorithm, particle memory is used to extract the elite and reinsertion + is made aggressively ..... getting rid of the worst guy). Also, the bounds of the problem + are enforced, as to allow PaGMO machinery to work. Fine control on each iteration can be achieved + by calling the algo with gen=1 (algo state is stored, cmaes will continue at next call ... without + initializing again all its state!!) + + * gen: number of generations + * cc: time constant for C cumulation (in [0,1]) if -1 automatic values are set + * cs: time constant for sigma cumulation (in [0,1]) if -1 automatic values are set + * c1: learning rate for rank-1 update (in [0,1]) if -1 automatic values are set + * cmu: learning rate for rank-mu update (in [0,1]) if -1 automatic values are set + * sigma0: starting step (std) + * xtol: stopping criteria on the x tolerance + * ftol: stopping criteria on the f tolerance + * memory: when True the algorithm preserves memory of covariance, step and more between successive runs + * screen_output: activates screen_output (output at each generation) + """ + try: + import numpy as np + except ImportError: + raise ImportError( + "This algorithm needs numpy to run. Is numpy installed?") + + if (gen <= 0): + raise ValueError("gen needs to be > 0") + + if ((cc < 0 or cc > 1) and not cc == -1): + raise ValueError("cc needs to be in [0,1] or -1 for auto value") + + if ((cs < 0 or cs > 1) and not cc == -1): + raise ValueError("cs needs to be in [0,1] or -1 for auto value") + + if ((c1 < 0 or c1 > 1) and not cc == -1): + raise ValueError("c1 needs to be in [0,1] or -1 for auto value") + + if ((cmu < 0 or cmu > 1) and not cc == -1): + raise ValueError("cmu needs to be in [0,1] or -1 for auto value") + + base.__init__(self) + + # Data members + self.__cc = cc + self.__cs = cs + self.__c1 = c1 + self.__cmu = cmu + self.__gen = gen + self.__xtol = xtol + self.__ftol = ftol + self.__sigma0 = sigma0 + self.__memory = memory + self.screen_output = screen_output + + # Algorithm memory + self.__mean = 0 + self.__variation = 0 + self.__newpop = np.matrix([[1]]) + self.__B = 0 + self.__D = 0 + self.__C = 0 + self.__invsqrtC = 0 + self.__pc = 0 + self.__ps = 0 + self.__counteval = 0 + self.__eigeneval = 0 + + np.random.seed() + + def evolve(self, pop): + from numpy import matrix, array, log, diag, eye, sqrt, exp, ones + from numpy.random import normal, random + from numpy.linalg import norm, eig + + # Let's rename some variables + prob = pop.problem + lb = prob.lb + ub = prob.ub + dim, cont_dim, int_dim, c_dim, f_dim = prob.dimension, prob.dimension - \ + prob.i_dimension, prob.i_dimension, prob.c_dimension, prob.f_dimension + + # And perform checks on the problem type + if cont_dim == 0: + raise ValueError( + "There is no continuous dimension for CMAES to optimise!!") + + if c_dim > 0: + raise ValueError( + "This version of CMAES is not suitable for constrained optimisation") + + if int_dim > 0: + raise ValueError( + "The chromosome has an integer part .... this version of CMAES is not able to deal with it") + if f_dim > 1: + raise ValueError( + "The problem is not single objective and CMAES is not suitable to solve it") + + if len(pop) < 5: + raise ValueError( + "for CMAES at least 5 individuals in the population are required") + + # Setting sizes ..... + N = dim + lam = len(pop) + mu = lam / 2 + + # Setting coefficients for Selection + weights = [log(mu + 0.5) - log(i + 1) for i in range(mu)] + sumW = sum(weights) + weights = [w / sumW for w in weights] + # weights for weighted recombination + # variance-effectiveness of sum w_i x_i + mueff = 1.0 / sum(w ** 2 for w in weights) + + # Setting coefficients for Adaptation automatically or to user defined + # data + cc = self.__cc + cs = self.__cs + c1 = self.__c1 + cmu = self.__cmu + if self.__cc == -1: + cc = (4 + mueff / N) / (N + 4 + 2 * mueff / N) + # t-const for cumulation for C + if self.__cs == -1: + cs = (mueff + 2) / (N + mueff + 5) + # t-const for cumulation for sigma control + if self.__c1 == -1: + c1 = 2 / ((N + 1.3) ** 2 + mueff) + # learning rate for rank-one update of C + if self.__cmu == -1: + cmu = 2 * (mueff - 2 + 1 / mueff) / ((N + 2) ** 2 + mueff) + # and for rank-mu update + + damps = 1 + 2 * max(0, sqrt((mueff - 1) / (N + 1)) - 1) + cs + # damping for sigma + # expectation of ||N(0,I)|| == norm(randn(N,1)) + chiN = N ** 0.5 * (1 - 1.0 / (4 * N) + 1.0 / (21 * N ** 2)) + + # Initializing and allocating + if (self.__newpop.shape == (N, lam)) and (self.__memory): + mean = self.__mean + variation = self.__variation + newpop = self.__newpop + B = self.__B + D = self.__D + C = self.__C + invsqrtC = self.__invsqrtC + pc = self.__pc + ps = self.__ps + counteval = self.__counteval + eigeneval = self.__eigeneval + else: + mean = matrix(pop.champion.x).T + variation = array([[0.0] * N] * lam) + newpop = matrix([[0.0] * lam] * N) + B = matrix(eye(N, N)) + # B defines the coordinate system + D = ones(N) + # diagonal D defines the scaling + C = matrix(eye(N, N)) + # covariance matrix C + invsqrtC = matrix(eye(N, N)) + # inverse of sqrt(C) + pc = matrix([[0]] * N) + ps = matrix([[0]] * N) + counteval = 0 + eigeneval = 0 + + sigma = self.__sigma0 + + if self.screen_output: + print("CMAES 4 PaGMO (Python)\n") + print("mu: " + str(mu) + " - lambda: " + str(lam) + + " - N: " + str(N) + " - muef: " + str(mueff) + "\n") + print( + "cc: " + + str(cc) + + " - cs: " + + str(cs) + + " - c1: " + + str(c1) + + " - cmu: " + + str(cmu) + + " - sigma: " + + str(sigma) + + " - damps: " + + str(damps) + + " - chiN: " + + str(chiN) + + "\n") + + # Let's start the algorithm + for gen in range(self.__gen): + + # 1 - We generate and evaluate lam new individuals + variation = [B * diag(D) * normal(0, 1, [dim, 1]) + for i in range(lam)] + variation = [[j[0, 0] for j in matr] for matr in variation] + for i, d_mu in enumerate(variation): + newpop[:, i] = mean + sigma * matrix(d_mu).T + + # fixing the bounds + for row in range(newpop.shape[0]): + for col in range(newpop.shape[1]): + if newpop[row, col] > ub[row]: + newpop[row, col] = lb[row] + \ + random() * (ub[row] - lb[row]) + elif newpop[row, col] < lb[row]: + newpop[row, col] = lb[row] + \ + random() * (ub[row] - lb[row]) + + #insert in population + for i in range(lam): + idx = pop.get_worst_idx() + pop.set_x(idx, [newpop[j, i] for j in range(N)]) + counteval += lam + + # 2 - We extract the elite from this generation + #a = sorted(pop,lambda x,y: cmp(x.cur_f,y.cur_f)) + elite = [matrix(pop[idx].best_x).T for idx in pop.get_best_idx(mu)] + #elite = [matrix(ind.cur_x).T for ind in a] + #elite = elite[:mu] + + # 3 - Compute the new elite mean storing the old one + meanold = mean + mean = elite[0] * weights[0] + for i in range(1, mu): + mean += elite[i] * weights[i] + + # 4 - Update evolution paths + ps = (1 - cs) * ps + sqrt(cs * (2 - cs) * mueff) * \ + invsqrtC * (mean - meanold) / sigma + hsig = ((ps.T * ps)[0, 0] / + (1 - (1 - cs) ** (2.0 * counteval / lam)) / N) < (2.0 + 4.0 / + (N + 1)) + hsig = int(hsig) + pc = (1 - cc) * pc + hsig * sqrt(cc * (2 - cc) * mueff) * \ + (mean - meanold) / sigma + + # 5 - Adapt Covariance Matrix + Cold = C + C = (elite[0] - meanold) * (elite[0] - meanold).T * weights[0] + for i in range(1, mu): + C += (elite[i] - meanold) * (elite[i] - meanold).T * weights[i] + C /= sigma ** 2 + C = (1 - c1 - cmu) * Cold + cmu * C + c1 * \ + ((pc * pc.T) + (1 - hsig) * cc * (2 - cc) * Cold) + + # 6 - Adapt sigma + sigma *= exp((cs / damps) * (norm(ps) / chiN - 1)) + + # 7 - Perform eigen-decomposition of C + # achieve O(N^2) + if ((counteval - eigeneval) > (lam / (c1 + cmu) / N / 10)): + eigeneval = counteval + C = (C + C.T) / 2 # enforce symmetry + D, B = eig(C) + # eigen decomposition, B==normalized eigenvectors + D = [s ** 0.5 for s in D] # D contains standard deviations now + # if not (0 in D): #Avoids numerical + # nans skipping evaluation of invsqrtC + invsqrtC = B * diag([1 / d for d in D]) * B.T + + # 8 - Print to screen if necessary + if self.screen_output: + if not(gen % 20): + print( + "\nGen.\tChampion\tHighest\t\tLowest\t\tVariation\t\tStep") + print( + "%d\t%e\t%e\t%e\t%e\t%e" % + (gen, pop.champion.f[0], max( + [ind.cur_f[0] for ind in pop]), min( + [ind.cur_f[0] for ind in pop]), norm(d_mu), sigma)) + + # 9 - Check the exit conditions (every 40 generations) + if not(gen % 40): + if (norm(d_mu) < self.__xtol): + if self.screen_output: + print("Exit condition -- xtol < " + str(self.__xtol)) + return pop + + tmp = abs(pop[pop.get_worst_idx()].best_f[0] - + pop[pop.get_best_idx()].best_f[0]) + + if (tmp < self.__ftol): + if self.screen_output: + print("Exit condition -- ftol < " + str(self.__ftol)) + return pop + + # Update algorithm memory + if self.__memory: + self.__mean = mean + self.__variation = variation + self.__newpop = newpop + self.__B = B + self.__D = D + self.__C = C + self.__invsqrtC = invsqrtC + self.__pc = pc + self.__ps = ps + self.__counteval = counteval + self.__eigeneval = eigeneval + self.__sigma0 = sigma + + if self.screen_output: + print("Exit condition -- iteration > " + str(self.__gen)) + return pop + + def get_name(self): + return "CMAES (Python)" + + def human_readable_extra(self): + return "gen=" + str(self.__gen) + " cc=" + str(self.__cc) + " cs=" + str(self.__cs) + " c1=" + str(self.__c1) + \ + " cmu=" + str(self.__cmu) + " sigma0=" + str(self.__sigma0) + " xtol=" + str(self.__xtol) + " ftol=" + str(self.__ftol) diff --git a/PyGMO/algorithm/_cross_entropy.py b/PyGMO/algorithm/_cross_entropy.py index 3eca41a9..ea478f1b 100644 --- a/PyGMO/algorithm/_cross_entropy.py +++ b/PyGMO/algorithm/_cross_entropy.py @@ -1,155 +1,184 @@ -from _base import base +from PyGMO.algorithm._base import base + class py_cross_entropy(base): - """ - Cross-Entropy algorithm (Python) - """ - def __init__(self, gen = 500, elite = 0.5, scale = 0.3, variant=1, screen_output = False): - """ - Constructs a Cross-Entropy Algorithm (Python) - - USAGE: algorithm.py_cross_entropy(gen = 1, elite = 0.5, scale = 0.2, variant=1, screen_output = False)) - - NOTE: A multivariate normal distribution is used. - The first sample is centered around the population champion. - Covariance matrix and mean is evaluated using ind.best_x - - * gen: number of generations - * elite: fraction of the population considered as elite (in (0,1]) - * scale: scaling factor for the estimated covariance matrix - * variant: algoritmic variant to use (one of [1,2]) - 1. 'Canonical' - Covariance Matrix is evaluated as sum (x_(i+1)-mu_i)^T (x_(i+1)-mu_i) - 2. 'Dario's' - Covariance Matrix is evaluated as sum (x_(i+1)-mu_i^T)^T (x_(i+1)-mu_i^T) - * screen_output: activates screen_output (output at each generation) - """ - try: - import numpy as np - except ImportError: - raise ImportError("This algorithm needs numpy to run. Is numpy installed?") - - base.__init__(self) - self.__gen = gen - self.__elite = elite - self.__scale = scale - self.__screen_output = screen_output - self.__weights = [] - self.__variant = variant - np.random.seed() - - def evolve(self,pop): - from numpy import matrix,array,log, diag - from numpy.random import multivariate_normal,random,normal - from numpy.linalg import norm, cholesky, LinAlgError, eig - import matplotlib.pyplot as pl - - # Let's rename some variables - prob = pop.problem - lb = prob.lb - ub = prob.ub - dim, cont_dim, int_dim, c_dim = prob.dimension, prob.dimension - prob.i_dimension, prob.i_dimension, prob.c_dimension - - # And perform checks on the problem type - if cont_dim == 0: - raise ValueError("There is no continuous dimension for cross_entropy to optimise!!") - - if c_dim > 0: - raise ValueError("This version of cross_entropy is not suitable for constrained optimisation") - - if int_dim > 0: - raise ValueError("The chromosome has an integer part .... this version of cross_entropy is not able to deal with it") - - # We then check that the elite is not empty - n_elite = int(len(pop) * self.__elite) - if n_elite == 0: - raise ValueError("Elite contains no individuals ..... maybe increase the elite parameter?") - - # If the incoming population is empty ... do nothing - np = len(pop) - if np == 0: - return population - - # Let's start the algorithm - mu = matrix(pop.champion.x) - C = matrix([[0]*n_elite]*n_elite) - variation = array([[0.0]*dim]*np) - newpop = array([[0.0]*dim]*np) - - self.__weights = [log(n_elite+0.5) - log(i+1) for i in range(n_elite)] # recombination weights - self.__weights = [w / sum(self.__weights) for w in self.__weights] # normalize recombination weights array - - for gen in range(self.__gen): - - #1 - We extract the elite from this generation (NOTE: we use best_f to rank) - elite = [matrix(pop[idx].best_x) for idx in pop.get_best_idx(n_elite)] - - pl.plot(0,0.1,'og') - for ind in elite: - pl.plot(ind[0,0],ind[0,1],'or') - pl.show() - raw_input() - - - #2 - We evaluate the Covariance Matrix - if self.__variant==1: - # as least square estimator of the elite (with mean mu) - C = (elite[0]-mu).T*(elite[0]-mu)*self.__weights[0] - for i in range(1,n_elite): - C = C + (elite[i]-mu).T*(elite[i]-mu)*self.__weights[i] - 1/0 - if self.__variant==2: - # using Dario's method - mu = mu.T - C = (elite[0]-mu).T*(elite[0]-mu)*self.__weights[0] - for i in range(1,n_elite): - C = C + (elite[i]-mu).T*(elite[i]-mu)*self.__weights[i] - #C = C / n_elite - - #3 - We compute the new elite mean - mu = elite[0]*self.__weights[0] - for i in range(1,n_elite): - mu = mu + elite[i]*self.__weights[i] - pl.plot(mu[0,0],mu[0,1],'ob') - raw_input() - - #4 - We generate the new sample - variation = multivariate_normal([0]*dim,C,[np]) - D, B = eig(C) # eigen decomposition, B==normalized eigenvectors, O(N**3) - D = [d**0.5 for d in D] # D contains standard deviations now - variation = [B * diag(D) * normal(0,1,[dim,1]) for i in range(np)] - variation = [[j[0,0] for j in matr] for matr in variation] - - #U = cholesky(C) - #for i in range(np): - # y = normal(0,1,[dim,1]) - # variation[i] = (U*y).T - for i,d_mu in enumerate(variation): - newpop[i] = mu + d_mu * self.__scale - pl.plot(newpop[i][0],newpop[i][1],'ok') - pl.show() - raw_input() - - #5 - We fix it within the bounds - for row in range(newpop.shape[0]): - for col in range(newpop.shape[1]): - if newpop[row,col] > ub[col]: - newpop[row,col] = lb[col] + random()*(ub[col]-lb[col]) - elif newpop[row,col] < lb[col]: - newpop[row,col] = lb[col] + random()*(ub[col]-lb[col]) - - #6 - And perform reinsertion - for i in range(np): - #idx = pop.get_worst_idx() - pop.set_x(i,newpop[i]) - - #7 - We print to screen if necessary - if self.__screen_output: - if not(gen%20): - print "\nGen.\tChampion\tHighest\t\tLowest\t\tVariation" - print "%d\t%e\t%e\t%e\t%e" % (gen,pop.champion.f[0],max([ind.cur_f[0] for ind in pop]),min([ind.cur_f[0] for ind in pop]), norm(d_mu)) - return pop - - - def get_name(self): - return "Cross Entropy (Python)" - def human_readable_extra(self): - return "gen=" + str(self.__gen) + " elite fraction=" + str(self.__elite) + " covariance scaling=" + str(self.__scale) + " variant=" + str(self.__variant) + + """ + Cross-Entropy algorithm (Python) + """ + + def __init__( + self, + gen=500, + elite=0.5, + scale=0.3, + variant=1, + screen_output=False): + """ + Constructs a Cross-Entropy Algorithm (Python) + + USAGE: algorithm.py_cross_entropy(gen = 1, elite = 0.5, scale = 0.2, variant=1, screen_output = False)) + + NOTE: A multivariate normal distribution is used. + The first sample is centered around the population champion. + Covariance matrix and mean is evaluated using ind.best_x + + * gen: number of generations + * elite: fraction of the population considered as elite (in (0,1]) + * scale: scaling factor for the estimated covariance matrix + * variant: algoritmic variant to use (one of [1,2]) + 1. 'Canonical' - Covariance Matrix is evaluated as sum (x_(i+1)-mu_i)^T (x_(i+1)-mu_i) + 2. 'Dario's' - Covariance Matrix is evaluated as sum (x_(i+1)-mu_i^T)^T (x_(i+1)-mu_i^T) + * screen_output: activates screen_output (output at each generation) + """ + try: + import numpy as np + except ImportError: + raise ImportError( + "This algorithm needs numpy to run. Is numpy installed?") + + base.__init__(self) + self.__gen = gen + self.__elite = elite + self.__scale = scale + self.__screen_output = screen_output + self.__weights = [] + self.__variant = variant + np.random.seed() + + def evolve(self, pop): + from numpy import matrix, array, log, diag + from numpy.random import multivariate_normal, random, normal + from numpy.linalg import norm, cholesky, LinAlgError, eig + import matplotlib.pyplot as pl + + # Let's rename some variables + prob = pop.problem + lb = prob.lb + ub = prob.ub + dim, cont_dim, int_dim, c_dim = prob.dimension, prob.dimension - \ + prob.i_dimension, prob.i_dimension, prob.c_dimension + + # And perform checks on the problem type + if cont_dim == 0: + raise ValueError( + "There is no continuous dimension for cross_entropy to optimise!!") + + if c_dim > 0: + raise ValueError( + "This version of cross_entropy is not suitable for constrained optimisation") + + if int_dim > 0: + raise ValueError( + "The chromosome has an integer part .... this version of cross_entropy is not able to deal with it") + + # We then check that the elite is not empty + n_elite = int(len(pop) * self.__elite) + if n_elite == 0: + raise ValueError( + "Elite contains no individuals ..... maybe increase the elite parameter?") + + # If the incoming population is empty ... do nothing + np = len(pop) + if np == 0: + return population + + # Let's start the algorithm + mu = matrix(pop.champion.x) + C = matrix([[0] * n_elite] * n_elite) + variation = array([[0.0] * dim] * np) + newpop = array([[0.0] * dim] * np) + + self.__weights = [log(n_elite + 0.5) - log(i + 1) + for i in range(n_elite)] # recombination weights + # normalize recombination weights array + self.__weights = [w / sum(self.__weights) for w in self.__weights] + + for gen in range(self.__gen): + + # 1 - We extract the elite from this generation (NOTE: we use + # best_f to rank) + elite = [matrix(pop[idx].best_x) + for idx in pop.get_best_idx(n_elite)] + + pl.plot(0, 0.1, 'og') + for ind in elite: + pl.plot(ind[0, 0], ind[0, 1], 'or') + pl.show() + input() + + # 2 - We evaluate the Covariance Matrix + if self.__variant == 1: + # as least square estimator of the elite (with mean mu) + C = (elite[0] - mu).T * (elite[0] - mu) * self.__weights[0] + for i in range(1, n_elite): + C = C + (elite[i] - mu).T * \ + (elite[i] - mu) * self.__weights[i] + 1 / 0 + if self.__variant == 2: + # using Dario's method + mu = mu.T + C = (elite[0] - mu).T * (elite[0] - mu) * self.__weights[0] + for i in range(1, n_elite): + C = C + (elite[i] - mu).T * \ + (elite[i] - mu) * self.__weights[i] + #C = C / n_elite + + # 3 - We compute the new elite mean + mu = elite[0] * self.__weights[0] + for i in range(1, n_elite): + mu = mu + elite[i] * self.__weights[i] + pl.plot(mu[0, 0], mu[0, 1], 'ob') + input() + + # 4 - We generate the new sample + variation = multivariate_normal([0] * dim, C, [np]) + # eigen decomposition, B==normalized eigenvectors, O(N**3) + D, B = eig(C) + D = [d ** 0.5 for d in D] # D contains standard deviations now + variation = [B * diag(D) * normal(0, 1, [dim, 1]) + for i in range(np)] + variation = [[j[0, 0] for j in matr] for matr in variation] + + #U = cholesky(C) + # for i in range(np): + # y = normal(0,1,[dim,1]) + # variation[i] = (U*y).T + for i, d_mu in enumerate(variation): + newpop[i] = mu + d_mu * self.__scale + pl.plot(newpop[i][0], newpop[i][1], 'ok') + pl.show() + input() + + # 5 - We fix it within the bounds + for row in range(newpop.shape[0]): + for col in range(newpop.shape[1]): + if newpop[row, col] > ub[col]: + newpop[row, col] = lb[col] + \ + random() * (ub[col] - lb[col]) + elif newpop[row, col] < lb[col]: + newpop[row, col] = lb[col] + \ + random() * (ub[col] - lb[col]) + + # 6 - And perform reinsertion + for i in range(np): + #idx = pop.get_worst_idx() + pop.set_x(i, newpop[i]) + + # 7 - We print to screen if necessary + if self.__screen_output: + if not(gen % 20): + print("\nGen.\tChampion\tHighest\t\tLowest\t\tVariation") + print( + "%d\t%e\t%e\t%e\t%e" % + (gen, pop.champion.f[0], max( + [ind.cur_f[0] for ind in pop]), min( + [ind.cur_f[0] for ind in pop]), norm(d_mu))) + return pop + + def get_name(self): + return "Cross Entropy (Python)" + + def human_readable_extra(self): + return "gen=" + str(self.__gen) + " elite fraction=" + str(self.__elite) + \ + " covariance scaling=" + str(self.__scale) + " variant=" + str(self.__variant) diff --git a/PyGMO/algorithm/_example.py b/PyGMO/algorithm/_example.py index be072f9e..1372c9e5 100644 --- a/PyGMO/algorithm/_example.py +++ b/PyGMO/algorithm/_example.py @@ -1,58 +1,62 @@ -from _base import base +from PyGMO.algorithm._base import base + class py_example(base): - """ - Monte-Carlo (random sampling) algorithm implemented purely in Python. - """ - - def __init__(self,iter = 10): - """ - Constructs a Monte-Carlo (random sampling) algorithm - - USAGE: algorithm.py_example(iter = 10) - - NOTE: At the end of each iteration, the randomly generated - point substitutes the worst individual in the population if better - - * iter: number of random samples - """ - #We start calling the base constructor - super(py_example,self).__init__() - #We then define the algorithm 'private' data members - self.__iter = iter - - #This is the 'juice' of the algorithm, the method where the actual optimzation is coded. - def evolve(self,pop): - #If the population is empty (i.e. no individuals) nothing happens - if len(pop) == 0: - return pop - - #Here we rename some variables, in particular the problem - prob = pop.problem - #Its dimensions (total and continuous) - dim, cont_dim = prob.dimension, prob.dimension - prob.i_dimension - #And the lower/upper bounds for the chromosome - lb, ub = prob.lb, prob.ub - import random - - #The algorithm now starts manipulating the population - for _ in range(self.__iter): - #We create a random vector within the bounds ... first the continuous part - tmp_cont = [random.uniform(lb[i],ub[i]) for i in range(cont_dim)] - #then the integer part - tmp_int = [float(random.randint(lb[i],ub[i])) for i in range(cont_dim,dim)] - #and we assemble them into one decision vector - tmp_x = tmp_cont + tmp_int - #which we push back in the population - pop.push_back(tmp_x) - #to then remove the worst individual - pop.erase(pop.get_worst_idx()) - #at the end of it all we return the 'evolved' population - return pop - - def get_name(self): - return "Monte Carlo (Python)" - - def human_readable_extra(self): - return "iter=" + str(self.__iter) + """ + Monte-Carlo (random sampling) algorithm implemented purely in Python. + """ + + def __init__(self, iter=10): + """ + Constructs a Monte-Carlo (random sampling) algorithm + + USAGE: algorithm.py_example(iter = 10) + + NOTE: At the end of each iteration, the randomly generated + point substitutes the worst individual in the population if better + + * iter: number of random samples + """ + # We start calling the base constructor + super(py_example, self).__init__() + # We then define the algorithm 'private' data members + self.__iter = iter + + # This is the 'juice' of the algorithm, the method where the actual + # optimzation is coded. + def evolve(self, pop): + # If the population is empty (i.e. no individuals) nothing happens + if len(pop) == 0: + return pop + + # Here we rename some variables, in particular the problem + prob = pop.problem + # Its dimensions (total and continuous) + dim, cont_dim = prob.dimension, prob.dimension - prob.i_dimension + # And the lower/upper bounds for the chromosome + lb, ub = prob.lb, prob.ub + import random + + # The algorithm now starts manipulating the population + for _ in range(self.__iter): + # We create a random vector within the bounds ... first the + # continuous part + tmp_cont = [random.uniform(lb[i], ub[i]) for i in range(cont_dim)] + # then the integer part + tmp_int = [float(random.randint(lb[i], ub[i])) + for i in range(cont_dim, dim)] + # and we assemble them into one decision vector + tmp_x = tmp_cont + tmp_int + # which we push back in the population + pop.push_back(tmp_x) + # to then remove the worst individual + pop.erase(pop.get_worst_idx()) + # at the end of it all we return the 'evolved' population + return pop + + def get_name(self): + return "Monte Carlo (Python)" + + def human_readable_extra(self): + return "iter=" + str(self.__iter) diff --git a/PyGMO/algorithm/_scipy_algos.py b/PyGMO/algorithm/_scipy_algos.py index 5306bbfc..b4caf638 100644 --- a/PyGMO/algorithm/_scipy_algos.py +++ b/PyGMO/algorithm/_scipy_algos.py @@ -1,326 +1,502 @@ -from _base import base +from PyGMO.algorithm._base import base # Helper class to ease the inclusion of scipy.optimize solvers. + + class _scipy_base(base): - def __init__(self,solver_name,constrained): - base.__init__(self) - try: - exec('from scipy.optimize import %s as solver' % solver_name) - from numpy import concatenate, array - except ImportError: - raise ImportError('The necessary SciPy and/or NumPy classes/functions could not be imported') - self.solver = solver - self.constrained = constrained - # Check if problem is compatible with the algorithm. - def _problem_checks(self,prob): - if prob.f_dimension > 1: - raise ValueError("this algorithm does not support multi-objective optimisation") - if prob.dimension == prob.i_dimension: - raise ValueError("the provided problem has no continuous part") - if not self.constrained and prob.c_dimension: - raise ValueError("this algorithm does not support constrained optimisation") - # Check that the algorithm did not go out of bounds, and, in such a case, correct the chromosome. - def _check_new_chromosome(self,new_chromosome,prob): - for i in range(0,len(new_chromosome)): - if new_chromosome[i] < prob.lb[i]: - new_chromosome[i] = prob.lb[i] - if new_chromosome[i] > prob.ub[i]: - new_chromosome[i] = prob.ub[i] - return new_chromosome - def _starting_params(self,pop): - from numpy import array - # Number of equality constraints. - n_ec = pop.problem.c_dimension - pop.problem.ic_dimension - # Extract the continuous part of the first individual's current chromosome. - x0 = array(pop[pop.get_best_idx()].cur_x[0:pop.problem.dimension - pop.problem.i_dimension],dtype=float) - # Combinatorial part of the chromosome (which will not be optimised). - x0_comb = array(pop[pop.get_best_idx()].cur_x[pop.problem.dimension - pop.problem.i_dimension:],dtype=float) - return n_ec,x0,x0_comb + + def __init__(self, solver_name, constrained): + base.__init__(self) + try: + exec('from scipy.optimize import %s as solver' % solver_name) + from numpy import concatenate, array + except ImportError: + raise ImportError( + 'The necessary SciPy and/or NumPy classes/functions could not be imported') + self.solver = solver + self.constrained = constrained + # Check if problem is compatible with the algorithm. + + def _problem_checks(self, prob): + if prob.f_dimension > 1: + raise ValueError( + "this algorithm does not support multi-objective optimisation") + if prob.dimension == prob.i_dimension: + raise ValueError("the provided problem has no continuous part") + if not self.constrained and prob.c_dimension: + raise ValueError( + "this algorithm does not support constrained optimisation") + # Check that the algorithm did not go out of bounds, and, in such a case, + # correct the chromosome. + + def _check_new_chromosome(self, new_chromosome, prob): + for i in range(0, len(new_chromosome)): + if new_chromosome[i] < prob.lb[i]: + new_chromosome[i] = prob.lb[i] + if new_chromosome[i] > prob.ub[i]: + new_chromosome[i] = prob.ub[i] + return new_chromosome + + def _starting_params(self, pop): + from numpy import array + # Number of equality constraints. + n_ec = pop.problem.c_dimension - pop.problem.ic_dimension + # Extract the continuous part of the first individual's current + # chromosome. + x0 = array( + pop[pop.get_best_idx()].cur_x + [0: pop.problem.dimension - pop.problem.i_dimension], dtype=float) + # Combinatorial part of the chromosome (which will not be optimised). + x0_comb = array( + pop[pop.get_best_idx()].cur_x + [pop.problem.dimension - pop.problem.i_dimension:], dtype=float) + return n_ec, x0, x0_comb + class scipy_fmin(_scipy_base): - """ - Wrapper around SciPy's fmin optimiser (Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables.) - """ - def __init__(self, maxiter=1, xtol=0.0001, ftol=0.0001, maxfun=None, full_output=0, disp=0, retall=0): - """ - Constructs a Nelder-Mead Simplex algorithm (SciPy) - - USAGE: algorithm.scipy_fmin(maxiter=1, xtol=0.0001, ftol=0.0001, maxfun=None, full_output=0, disp=0, retall=0) - - * maxiter: Maximum number of iterations to perform - * xtol: Relative error in xopt acceptable for convergence - * ftol: Relative error in func(xopt) acceptable for convergence - * maxfun: Maximum number of function evaluations to make - * full_output: Set to True if fval and warnflag outputs are desired - * disp: Set to True to print convergence messages - * retall: Set to True to return list of solutions at each iteration - """ - _scipy_base.__init__(self,'fmin',False) - self.xtol = xtol - self.ftol = ftol - self.maxiter = maxiter - self.maxfun = maxfun - self.full_output = full_output - self.disp = disp - self.retall = retall - - def evolve(self,pop): - from numpy import concatenate - prob = pop.problem - self._problem_checks(prob) - if len(pop) == 0: - return pop - _, x0, x0_comb = self._starting_params(pop) - retval = self.solver(lambda x: prob.objfun(concatenate((x,x0_comb)))[0],x0,xtol=self.xtol, ftol=self.ftol, maxiter=self.maxiter, maxfun=self.maxfun, full_output=self.full_output, disp=self.disp, retall=self.retall) - new_chromosome = list(retval) + list(x0_comb) - pop.set_x(pop.get_best_idx(),self._check_new_chromosome(new_chromosome,prob)) - return pop - def get_name(self): - return "Nelder-Mead Simplex (SciPy)" - def human_readable_extra(self): - return "maxiter = " + str(self.maxiter) + ", xtol = " + str(self.xtol) + ", ftol = " + str(self.ftol) + ", maxfun = " +str(self.maxfun) + + """ + Wrapper around SciPy's fmin optimiser (Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables.) + """ + + def __init__( + self, + maxiter=1, + xtol=0.0001, + ftol=0.0001, + maxfun=None, + full_output=0, + disp=0, + retall=0): + """ + Constructs a Nelder-Mead Simplex algorithm (SciPy) + + USAGE: algorithm.scipy_fmin(maxiter=1, xtol=0.0001, ftol=0.0001, maxfun=None, full_output=0, disp=0, retall=0) + + * maxiter: Maximum number of iterations to perform + * xtol: Relative error in xopt acceptable for convergence + * ftol: Relative error in func(xopt) acceptable for convergence + * maxfun: Maximum number of function evaluations to make + * full_output: Set to True if fval and warnflag outputs are desired + * disp: Set to True to print convergence messages + * retall: Set to True to return list of solutions at each iteration + """ + _scipy_base.__init__(self, 'fmin', False) + self.xtol = xtol + self.ftol = ftol + self.maxiter = maxiter + self.maxfun = maxfun + self.full_output = full_output + self.disp = disp + self.retall = retall + + def evolve(self, pop): + from numpy import concatenate + prob = pop.problem + self._problem_checks(prob) + if len(pop) == 0: + return pop + _, x0, x0_comb = self._starting_params(pop) + retval = self.solver( + lambda x: prob.objfun( + concatenate( + (x, + x0_comb)))[0], + x0, + xtol=self.xtol, + ftol=self.ftol, + maxiter=self.maxiter, + maxfun=self.maxfun, + full_output=self.full_output, + disp=self.disp, + retall=self.retall) + new_chromosome = list(retval) + list(x0_comb) + pop.set_x( + pop.get_best_idx(), + self._check_new_chromosome( + new_chromosome, + prob)) + return pop + + def get_name(self): + return "Nelder-Mead Simplex (SciPy)" + + def human_readable_extra(self): + return "maxiter = " + str(self.maxiter) + ", xtol = " + str(self.xtol) + \ + ", ftol = " + str(self.ftol) + ", maxfun = " + str(self.maxfun) + class scipy_l_bfgs_b(_scipy_base): - """ - Wrapper around SciPy's fmin_l_bfgs_b optimiser (uses L-BFGS-B algorithm) - """ - def __init__(self, maxfun = 1, m = 10, factr = 10000000.0, pgtol = 1e-05, epsilon = 1e-08, screen_output = False): - """ - Constructs a L-BFGS-B algorithm (SciPy) - - NOTE: gradient is numerically approximated - - USAGE: algorithm.scipy_l_bfgs_b(maxfun = 15000, m = 10, factr = 10000000.0, pgtol = 1e-05, epsilon = 1e-08, screen_output = False): - - * maxfun: maximum number of function evaluations - * m: the maximum number of variable metric corrections - used to define the limited memory matrix. (the limited memory BFGS - method does not store the full hessian but uses this many terms in an - approximation to it). - * factr: The iteration stops when - (f{k} - f{k+1}) / max{\| f{k} \| , \| f{k+1} \|,1} <= factr*epsmch - where epsmch is the machine precision, which is automatically - generated by the code. Typical values for factr: 1e12 for - low accuracy; 1e7 for moderate accuracy; 10.0 for extremely - high accuracy. - * pgtol: The iteration will stop when - max{\| proj g{i} \| i = 1, ..., n} <= pgtol - where proj g{i} is the ith component of the projected gradient. - * epsilon: step size used when approx_grad is true, for numerically - calculating the gradient - * screen_output: Set to True to print iterations - """ - _scipy_base.__init__(self,'fmin_l_bfgs_b',False) - self.maxfun = maxfun - self.m = m - self.factr = factr - self.pgtol = pgtol - self.epsilon = epsilon - self.screen_output = screen_output - def evolve(self,pop): - from numpy import concatenate, array - prob = pop.problem - self._problem_checks(prob) - if len(pop) == 0: - return pop - _, x0, x0_comb = self._starting_params(pop) - # Extract the a list of tuples representing the bounds. - prob_bounds = [(prob.lb[i],prob.ub[i]) for i in range(0,prob.dimension - prob.i_dimension)] - if self.screen_output: - iprn = 1 - else: - iprn = -1 - retval = self.solver(lambda x: array(prob.objfun(concatenate((x,x0_comb))),dtype=float),x0,bounds = prob_bounds,approx_grad = True, iprint = iprn, pgtol = self.pgtol, maxfun = self.maxfun, factr = self.factr, m = self.m, epsilon=self.epsilon) - new_chromosome = list(retval[0]) + list(x0_comb) - pop.set_x(pop.get_best_idx(),self._check_new_chromosome(new_chromosome,prob)) - return pop - def get_name(self): - return "L-BFGS-B (SciPy)" - def human_readable_extra(self): - return "maxfun = " + str(self.maxfun) + ", m = " + str(self.m) + ", factr = " + str(self.factr) + ", pgtol = " +str(self.pgtol) + ", epsilon = " +str(self.epsilon) + + """ + Wrapper around SciPy's fmin_l_bfgs_b optimiser (uses L-BFGS-B algorithm) + """ + + def __init__( + self, + maxfun=1, + m=10, + factr=10000000.0, + pgtol=1e-05, + epsilon=1e-08, + screen_output=False): + """ + Constructs a L-BFGS-B algorithm (SciPy) + + NOTE: gradient is numerically approximated + + USAGE: algorithm.scipy_l_bfgs_b(maxfun = 15000, m = 10, factr = 10000000.0, pgtol = 1e-05, epsilon = 1e-08, screen_output = False): + + * maxfun: maximum number of function evaluations + * m: the maximum number of variable metric corrections + used to define the limited memory matrix. (the limited memory BFGS + method does not store the full hessian but uses this many terms in an + approximation to it). + * factr: The iteration stops when + (f{k} - f{k+1}) / max{\| f{k} \| , \| f{k+1} \|,1} <= factr*epsmch + where epsmch is the machine precision, which is automatically + generated by the code. Typical values for factr: 1e12 for + low accuracy; 1e7 for moderate accuracy; 10.0 for extremely + high accuracy. + * pgtol: The iteration will stop when + max{\| proj g{i} \| i = 1, ..., n} <= pgtol + where proj g{i} is the ith component of the projected gradient. + * epsilon: step size used when approx_grad is true, for numerically + calculating the gradient + * screen_output: Set to True to print iterations + """ + _scipy_base.__init__(self, 'fmin_l_bfgs_b', False) + self.maxfun = maxfun + self.m = m + self.factr = factr + self.pgtol = pgtol + self.epsilon = epsilon + self.screen_output = screen_output + + def evolve(self, pop): + from numpy import concatenate, array + prob = pop.problem + self._problem_checks(prob) + if len(pop) == 0: + return pop + _, x0, x0_comb = self._starting_params(pop) + # Extract the a list of tuples representing the bounds. + prob_bounds = [(prob.lb[i], prob.ub[i]) + for i in range(0, prob.dimension - prob.i_dimension)] + if self.screen_output: + iprn = 1 + else: + iprn = -1 + retval = self.solver( + lambda x: array( + prob.objfun( + concatenate( + (x, + x0_comb))), + dtype=float), + x0, + bounds=prob_bounds, + approx_grad=True, + iprint=iprn, + pgtol=self.pgtol, + maxfun=self.maxfun, + factr=self.factr, + m=self.m, + epsilon=self.epsilon) + new_chromosome = list(retval[0]) + list(x0_comb) + pop.set_x( + pop.get_best_idx(), + self._check_new_chromosome( + new_chromosome, + prob)) + return pop + + def get_name(self): + return "L-BFGS-B (SciPy)" + + def human_readable_extra(self): + return "maxfun = " + str(self.maxfun) + ", m = " + str(self.m) + ", factr = " + \ + str(self.factr) + ", pgtol = " + str(self.pgtol) + ", epsilon = " + str(self.epsilon) + class scipy_slsqp(_scipy_base): - """ - Wrapper around SciPy's slsqp optimiser. - """ - def __init__(self,max_iter = 100,acc = 1E-8,epsilon = 1.4901161193847656e-08, screen_output = False): - """ - Constructs a Sequential Least SQuares Programming algorithm - - NOTE: gradient is numerically approximated - - USAGE: algorithm.scipy_slsqp(max_iter = 100,acc = 1E-6,epsilon = 1.49e-08, screen_output = False)) - - - * max_iter: The maximum number of iterations. - * acc: Requested accuracy. - * epsilon: The step size for finite-difference derivative estimates. - * screen_output: Set to True to print iterations - """ - - _scipy_base.__init__(self,'fmin_slsqp',True) - self.max_iter = max_iter - self.acc = acc - self.epsilon = epsilon - self.screen_output = screen_output - def get_name(self): - return 'Sequential Least SQuares Programming (SciPy)' - def evolve(self,pop): - from numpy import concatenate, array - prob = pop.problem - self._problem_checks(prob) - # If population is empty, just return input population. - if len(pop) == 0: - return pop - # Get starting params. - n_ec, x0, x0_comb = self._starting_params(pop) - # Extract the a list of tuples representing the bounds. - prob_bounds = [(prob.lb[i],prob.ub[i]) for i in range(0,prob.dimension - prob.i_dimension)] - if self.screen_output: - iprn = 2 - else: - iprn = 0 - # Run the optimisation. - retval = self.solver(lambda x: prob.objfun(concatenate((x, x0_comb)))[0],x0,f_eqcons = lambda x: array(prob.compute_constraints(concatenate((x, x0_comb)))[0:n_ec],dtype=float), - f_ieqcons = lambda x: array(prob.compute_constraints(concatenate((x, x0_comb)))[n_ec:],dtype=float) * -1,bounds = prob_bounds,iprint = iprn, - iter = self.max_iter,acc = self.acc, epsilon = self.epsilon) - # Set the individual's chromosome in the population and return. Conserve the integer part from the - # original individual. - new_chromosome = list(retval) + list(x0_comb) - pop.set_x(pop.get_best_idx(),self._check_new_chromosome(new_chromosome,prob)) - return pop - def human_readable_extra(self): - return "maxiter = " + str(self.max_iter) + ", acc = " + str(self.acc) + ", epsilon = " + str(self.epsilon) + + """ + Wrapper around SciPy's slsqp optimiser. + """ + + def __init__( + self, + max_iter=100, + acc=1E-8, + epsilon=1.4901161193847656e-08, + screen_output=False): + """ + Constructs a Sequential Least SQuares Programming algorithm + + NOTE: gradient is numerically approximated + + USAGE: algorithm.scipy_slsqp(max_iter = 100,acc = 1E-6,epsilon = 1.49e-08, screen_output = False)) + + + * max_iter: The maximum number of iterations. + * acc: Requested accuracy. + * epsilon: The step size for finite-difference derivative estimates. + * screen_output: Set to True to print iterations + """ + + _scipy_base.__init__(self, 'fmin_slsqp', True) + self.max_iter = max_iter + self.acc = acc + self.epsilon = epsilon + self.screen_output = screen_output + + def get_name(self): + return 'Sequential Least SQuares Programming (SciPy)' + + def evolve(self, pop): + from numpy import concatenate, array + prob = pop.problem + self._problem_checks(prob) + # If population is empty, just return input population. + if len(pop) == 0: + return pop + # Get starting params. + n_ec, x0, x0_comb = self._starting_params(pop) + # Extract the a list of tuples representing the bounds. + prob_bounds = [(prob.lb[i], prob.ub[i]) + for i in range(0, prob.dimension - prob.i_dimension)] + if self.screen_output: + iprn = 2 + else: + iprn = 0 + # Run the optimisation. + retval = self.solver( + lambda x: prob.objfun( + concatenate( + (x, x0_comb)))[0], x0, f_eqcons=lambda x: array( + prob.compute_constraints( + concatenate( + (x, x0_comb)))[ + 0:n_ec], dtype=float), f_ieqcons=lambda x: array( + prob.compute_constraints( + concatenate( + (x, x0_comb)))[ + n_ec:], dtype=float) * -1, bounds=prob_bounds, iprint=iprn, iter=self.max_iter, acc=self.acc, epsilon=self.epsilon) + # Set the individual's chromosome in the population and return. Conserve the integer part from the + # original individual. + new_chromosome = list(retval) + list(x0_comb) + pop.set_x( + pop.get_best_idx(), + self._check_new_chromosome( + new_chromosome, + prob)) + return pop + + def human_readable_extra(self): + return "maxiter = " + \ + str(self.max_iter) + ", acc = " + str(self.acc) + ", epsilon = " + str(self.epsilon) + class scipy_tnc(_scipy_base): - """ - Wrapper around SciPy's tnc optimiser. - """ - def __init__(self, maxfun = 15000, xtol = -1, ftol = -1, pgtol = 1e-05, epsilon = 1e-08, screen_output = False): - """ - Constructs a Truncated Newton Method algorithm (SciPy) - - NOTE: gradient is numerically approximated - - USAGE: algorithm.scipy_tnc(maxfun = 1, xtol = -1, ftol = -1, pgtol = 1e-05, epsilon = 1e-08, screen_output = False) - - * maxfun: Maximum number of function evaluation. - * xtol: Precision goal for the value of x in the stopping criterion - (after applying x scaling factors). If xtol < 0.0, xtol is set to - sqrt(machine_precision). Defaults to -1. - * ftol: Precision goal for the value of f in the stoping criterion. - If ftol < 0.0, ftol is set to 0.0 defaults to -1. - * pgtol: Precision goal for the value of the projected gradient in the - stopping criterion (after applying x scaling factors). If pgtol - < 0.0, pgtol is set to 1e-2 * sqrt(accuracy). - Setting it to 0.0 is not recommended. Defaults to -1. - * epsilon: The stepsize in a finite difference approximation for the objfun - * screen_output: Set to True to print iterations - """ - _scipy_base.__init__(self,'fmin_tnc',False) - self.maxfun = maxfun - self.xtol = xtol - self.ftol = ftol - self.pgtol = pgtol - self.epsilon = epsilon - self.screen_output = screen_output - def evolve(self,pop): - from numpy import concatenate, array - prob = pop.problem - self._problem_checks(prob) - if len(pop) == 0: - return pop - _, x0, x0_comb = self._starting_params(pop) - # Extract the a list of tuples representing the bounds. - prob_bounds = [(prob.lb[i],prob.ub[i]) for i in range(0,prob.dimension - prob.i_dimension)] - if self.screen_output: - msg = 15 - else: - msg = 0 - retval = self.solver(lambda x: array(prob.objfun(concatenate((x,x0_comb))),dtype=float),x0,bounds = prob_bounds,approx_grad = True, messages = msg, - maxfun = self.maxfun, xtol = self.xtol, ftol = self.ftol, pgtol = self.pgtol, epsilon = self.epsilon) - new_chromosome = list(retval[0]) + list(x0_comb) - pop.set_x(pop.get_best_idx(),self._check_new_chromosome(new_chromosome,prob)) - return pop - def get_name(self): - return "Truncated Newton Method (SciPy)" - def human_readable_extra(self): - return "maxfun = " + str(self.maxfun) + ", xtol = " + str(self.xtol) + ", ftol = " + str(self.ftol) + ", pgtol = " +str(self.pgtol) + ", epsilon = " +str(self.epsilon) + + """ + Wrapper around SciPy's tnc optimiser. + """ + + def __init__( + self, + maxfun=15000, + xtol=-1, + ftol=-1, + pgtol=1e-05, + epsilon=1e-08, + screen_output=False): + """ + Constructs a Truncated Newton Method algorithm (SciPy) + + NOTE: gradient is numerically approximated + + USAGE: algorithm.scipy_tnc(maxfun = 1, xtol = -1, ftol = -1, pgtol = 1e-05, epsilon = 1e-08, screen_output = False) + + * maxfun: Maximum number of function evaluation. + * xtol: Precision goal for the value of x in the stopping criterion + (after applying x scaling factors). If xtol < 0.0, xtol is set to + sqrt(machine_precision). Defaults to -1. + * ftol: Precision goal for the value of f in the stoping criterion. + If ftol < 0.0, ftol is set to 0.0 defaults to -1. + * pgtol: Precision goal for the value of the projected gradient in the + stopping criterion (after applying x scaling factors). If pgtol + < 0.0, pgtol is set to 1e-2 * sqrt(accuracy). + Setting it to 0.0 is not recommended. Defaults to -1. + * epsilon: The stepsize in a finite difference approximation for the objfun + * screen_output: Set to True to print iterations + """ + _scipy_base.__init__(self, 'fmin_tnc', False) + self.maxfun = maxfun + self.xtol = xtol + self.ftol = ftol + self.pgtol = pgtol + self.epsilon = epsilon + self.screen_output = screen_output + + def evolve(self, pop): + from numpy import concatenate, array + prob = pop.problem + self._problem_checks(prob) + if len(pop) == 0: + return pop + _, x0, x0_comb = self._starting_params(pop) + # Extract the a list of tuples representing the bounds. + prob_bounds = [(prob.lb[i], prob.ub[i]) + for i in range(0, prob.dimension - prob.i_dimension)] + if self.screen_output: + msg = 15 + else: + msg = 0 + retval = self.solver( + lambda x: array( + prob.objfun( + concatenate( + (x, + x0_comb))), + dtype=float), + x0, + bounds=prob_bounds, + approx_grad=True, + messages=msg, + maxfun=self.maxfun, + xtol=self.xtol, + ftol=self.ftol, + pgtol=self.pgtol, + epsilon=self.epsilon) + new_chromosome = list(retval[0]) + list(x0_comb) + pop.set_x( + pop.get_best_idx(), + self._check_new_chromosome( + new_chromosome, + prob)) + return pop + + def get_name(self): + return "Truncated Newton Method (SciPy)" + + def human_readable_extra(self): + return "maxfun = " + str(self.maxfun) + ", xtol = " + str(self.xtol) + ", ftol = " + \ + str(self.ftol) + ", pgtol = " + str(self.pgtol) + ", epsilon = " + str(self.epsilon) class scipy_cobyla(_scipy_base): - """ - Wrapper around SciPy's cobyla optimiser. - """ - def __init__(self,max_fun = 1,rho_end = 1E-5,screen_output = False): - """ - Constructs a Constrained Optimization BY Linear Approximation (COBYLA) algorithm (SciPy) - - NOTE: equality constraints are transformed into two inequality constraints automatically - - USAGE: algorithm.scipy_cobyla(max_fun = 1,rho_end = 1E-5,screen_output = False) - - * maxfun: Maximum number of function evaluations. - * rhoend: Final accuracy in the optimization (not precisely guaranteed). This is a lower bound on the size of the trust region. - * screen_output: Set to True to print iterations - """ - _scipy_base.__init__(self,'fmin_cobyla',True) - self.max_fun = max_fun - self.rho_end = rho_end - self.screen_output = screen_output - def evolve(self,pop): - from numpy import concatenate, array - prob = pop.problem - self._problem_checks(prob) - if len(pop) == 0: - return pop - nec, x0, x0_comb = self._starting_params(pop) - # We need to build a vector of functions for the constraints. COBYLA uses inequality constraints with >= 0. - ub = prob.ub - lb = prob.lb - f_cons = [] - for i in range(0,nec): - # Each eq. constraint is converted into two ineq. constraints with different signs, - f_cons.append(lambda x, i = i: prob.compute_constraints(concatenate((x, x0_comb)))[i]) - f_cons.append(lambda x, i = i: -prob.compute_constraints(concatenate((x, x0_comb)))[i]) - for i in range(nec,prob.c_dimension): - # Ineq. constraints. - f_cons.append(lambda x, i = i: -prob.compute_constraints(concatenate((x, x0_comb)))[i]) - for i in range(0,prob.dimension - prob.i_dimension): - # Box bounds implemented as inequality constraints. - f_cons.append(lambda x, i = i: x[i] - lb[i]) - f_cons.append(lambda x, i = i: ub[i] - x[i]) - if self.screen_output: - iprn = 1 - else: - iprn = 0 - retval = self.solver(lambda x: array(prob.objfun(concatenate((x,x0_comb))),dtype=float),x0,cons = f_cons,iprint = iprn, maxfun = self.maxfun, rhoend = self.rhoend) - new_chromosome = list(retval) + list(x0_comb) - pop.set_x(pop.get_best_idx(),self._check_new_chromosome(new_chromosome,prob)) - return pop - def get_name(self): - return "Constrained Optimization BY Linear Approximation (SciPy)" - def human_readable_extra(self): - return "maxfun = " + str(self.maxfun) + ", rhoend = " + str(self.rhoend) - - -#This algorithm suck at the moment and thus I will not include it - - -#class scipy_anneal(_scipy_base): + + """ + Wrapper around SciPy's cobyla optimiser. + """ + + def __init__(self, max_fun=1, rho_end=1E-5, screen_output=False): + """ + Constructs a Constrained Optimization BY Linear Approximation (COBYLA) algorithm (SciPy) + + NOTE: equality constraints are transformed into two inequality constraints automatically + + USAGE: algorithm.scipy_cobyla(max_fun = 1,rho_end = 1E-5,screen_output = False) + + * maxfun: Maximum number of function evaluations. + * rhoend: Final accuracy in the optimization (not precisely guaranteed). This is a lower bound on the size of the trust region. + * screen_output: Set to True to print iterations + """ + _scipy_base.__init__(self, 'fmin_cobyla', True) + self.max_fun = max_fun + self.rho_end = rho_end + self.screen_output = screen_output + + def evolve(self, pop): + from numpy import concatenate, array + prob = pop.problem + self._problem_checks(prob) + if len(pop) == 0: + return pop + nec, x0, x0_comb = self._starting_params(pop) + # We need to build a vector of functions for the constraints. COBYLA + # uses inequality constraints with >= 0. + ub = prob.ub + lb = prob.lb + f_cons = [] + for i in range(0, nec): + # Each eq. constraint is converted into two ineq. constraints with + # different signs, + f_cons.append( + lambda x, i=i: prob.compute_constraints( + concatenate( + (x, x0_comb)))[i]) + f_cons.append( + lambda x, + i=i: - + prob.compute_constraints( + concatenate( + (x, + x0_comb)))[i]) + for i in range(nec, prob.c_dimension): + # Ineq. constraints. + f_cons.append( + lambda x, + i=i: - + prob.compute_constraints( + concatenate( + (x, + x0_comb)))[i]) + for i in range(0, prob.dimension - prob.i_dimension): + # Box bounds implemented as inequality constraints. + f_cons.append(lambda x, i=i: x[i] - lb[i]) + f_cons.append(lambda x, i=i: ub[i] - x[i]) + if self.screen_output: + iprn = 1 + else: + iprn = 0 + retval = self.solver( + lambda x: array( + prob.objfun( + concatenate( + (x, + x0_comb))), + dtype=float), + x0, + cons=f_cons, + iprint=iprn, + maxfun=self.maxfun, + rhoend=self.rhoend) + new_chromosome = list(retval) + list(x0_comb) + pop.set_x( + pop.get_best_idx(), + self._check_new_chromosome( + new_chromosome, + prob)) + return pop + + def get_name(self): + return "Constrained Optimization BY Linear Approximation (SciPy)" + + def human_readable_extra(self): + return "maxfun = " + \ + str(self.maxfun) + ", rhoend = " + str(self.rhoend) + + +# This algorithm suck at the moment and thus I will not include it + + +# class scipy_anneal(_scipy_base): # """ # Wrapper around SciPy's anneal optimiser. # """ -# def __init__(self, schedule = 'fast', screen_output = False, T0 = None, Tf = 9.9999999999999998e-13, maxfun = 1000000, maxaccept = None, maxiter = 100000, boltzmann #= 1.0, learn_rate = 0.5, feps = 9.9999999999999995e-07, dwell = 5): +# def __init__(self, schedule = 'fast', screen_output = False, T0 = None, Tf = 9.9999999999999998e-13, maxfun = 1000000, maxaccept = None, maxiter = 100000, boltzmann #= 1.0, learn_rate = 0.5, feps = 9.9999999999999995e-07, dwell = 5): # """ # Constructs a Simulate Annealing algorithm # -# USAGE: algorithm.scipy_anneal(maxiter = 100000, T0 = None, Tf = 9.9999999999999998e-13, schedule = 'fast', +# USAGE: algorithm.scipy_anneal(maxiter = 100000, T0 = None, Tf = 9.9999999999999998e-13, schedule = 'fast', # maxfun = 1000000, maxaccept = None, boltzmann = 1.0, learn_rate = 0.5, # feps = 9.9999999999999995e-07, dwell = 50, screen_output = False) # # NOTE: it is not guaranteed that the objective function will be only called with -# chrmosomes within the bounds +# chrmosomes within the bounds # # * maxiter: Maximum cooling iterations # * T0: Starting temperature @@ -352,18 +528,18 @@ def human_readable_extra(self): # from numpy import concatenate, array # prob = pop.problem # self._problem_checks(prob) -# # If population is empty, just return input population. +# If population is empty, just return input population. # if len(pop) == 0: # return pop -# # Get starting params. +# Get starting params. # n_ec, x0, x0_comb = self._starting_params(pop) -# # Run the optimisation. +# Run the optimisation. # retval = self.solver(lambda x: prob.objfun(concatenate((x, x0_comb)))[0],x0,lower = array(prob.lb,dtype=float),upper = array(prob.ub,dtype=float) # , full_output = int(self.screen_output), schedule = self.schedule, T0 = self.T0, Tf = self.Tf, maxeval = self.maxfun # , maxaccept = self.maxaccept, maxiter = self.maxiter, boltzmann = self.boltzmann, learn_rate = self.learn_rate # , feps = self.feps, dwell = self.dwell) -# # Set the individual's chromosome in the population and return. Conserve the integer part from the -# # original individual. +# Set the individual's chromosome in the population and return. Conserve the integer part from the +# original individual. # new_chromosome = list(retval[0]) + list(x0_comb) # pop.set_x(0,self._check_new_chromosome(new_chromosome,prob)) # return pop @@ -373,7 +549,3 @@ def human_readable_extra(self): # return ("maxiter = " + str(self.maxiter) + ", T0 = " + str(self.T0) + ", Tf = " + str(self.Tf) + ", schedule = " + self.schedule # + ", maxfun = " + str(self.maxfun) + ", maxaccept = " + str(self.maxaccept) + ", boltzmann = " + str(self.boltzmann) # + ", learn_rate = " + str(self.learn_rate) + ", feps = " + str(self.feps) + ", dwell = " + str(self.dwell) - - - - diff --git a/PyGMO/algorithm/algorithm.cpp b/PyGMO/algorithm/algorithm.cpp index 0cf54b9a..032e5ba5 100644 --- a/PyGMO/algorithm/algorithm.cpp +++ b/PyGMO/algorithm/algorithm.cpp @@ -348,6 +348,20 @@ BOOST_PYTHON_MODULE(_algorithm) { " - nw: number of weights" ); + // MOEA/D + enum_("_weight_generation_moead") + .value("RANDOM", algorithm::moead::RANDOM) + .value("GRID", algorithm::moead::GRID) + .value("LOW_DISCREPANCY", algorithm::moead::LOW_DISCREPANCY); + algorithm_wrapper("moead", "MOEA/D-DE") + .def(init >()) + .def("generate_weights", &algorithm::moead::generate_weights, + "Generates the weights of the decomposed problem\n\n" + " USAGE:: w = moead.generate_weights(nf,nw)\n" + " - nf: fitness dimension\n" + " - nw: number of weights" + ); + // SMS-EMOA algorithm_wrapper("sms_emoa", "The SMS-EMOA algorithm") .def(init >()) diff --git a/PyGMO/algorithm/python_base.h b/PyGMO/algorithm/python_base.h index 27221ccc..9afc99ee 100644 --- a/PyGMO/algorithm/python_base.h +++ b/PyGMO/algorithm/python_base.h @@ -106,6 +106,6 @@ class __PAGMO_VISIBLE python_base: public base, public boost::python::wrapperob_type == 0 - || obj_ptr->ob_type->ob_type == 0 - || obj_ptr->ob_type->ob_type->tp_name == 0 + || Py_TYPE(obj_ptr->ob_type) == 0 + || Py_TYPE(obj_ptr->ob_type)->tp_name == 0 || std::strcmp( - obj_ptr->ob_type->ob_type->tp_name, + Py_TYPE(obj_ptr->ob_type)->tp_name, "Boost.Python.class") != 0) && PyObject_HasAttrString(obj_ptr, "__len__") && PyObject_HasAttrString(obj_ptr, "__getitem__")))) return 0; diff --git a/PyGMO/core/__init__.py b/PyGMO/core/__init__.py index 8a102721..3e6b6072 100644 --- a/PyGMO/core/__init__.py +++ b/PyGMO/core/__init__.py @@ -1,22 +1,36 @@ # -*- coding: utf-8 -*- -from _core import * +from PyGMO.core._core import * import threading as _threading import signal as _signal import os as _os __doc__ = 'PyGMO core module.' -__all__ = ['archipelago','base_island','champion','distribution_type','individual','ipy_island','island','local_island','migration_direction','population','py_island'] +__all__ = [ + 'archipelago', + 'base_island', + 'champion', + 'distribution_type', + 'individual', + 'ipy_island', + 'island', + 'local_island', + 'migration_direction', + 'population', + 'py_island'] _orig_signal = _signal.getsignal(_signal.SIGINT) _main_pid = _os.getpid() -# Alternative signal handler which ignores sigint if called from a child process. -def _sigint_handler(signum,frame): - import os - if os.getpid() == _main_pid: - _orig_signal(signum,frame) +# Alternative signal handler which ignores sigint if called from a child +# process. -_signal.signal(_signal.SIGINT,_sigint_handler) + +def _sigint_handler(signum, frame): + import os + if os.getpid() == _main_pid: + _orig_signal(signum, frame) + +_signal.signal(_signal.SIGINT, _sigint_handler) # Global lock used when starting processes. _process_lock = _threading.Lock() @@ -24,433 +38,560 @@ def _sigint_handler(signum,frame): # Raw C++ base island class. _base_island = _core._base_island + class base_island(_core._base_island): - def __init__(self,*args): - if len(args) == 0: - raise ValueError("Cannot initialise base island without parameters for the constructor.") - _core._base_island.__init__(self,*args) - def get_name(self): - return str(type(self)) - def __get_deepcopy__(self): - from copy import deepcopy - return deepcopy(self) - -def _generic_island_ctor(self,*args,**kwargs): - """Unnamed arguments: - - #. algorithm - #. problem or population - #. number of individuals (optional and valid only if the second argument is a problem, defaults to 0 if not specified) - - Keyword arguments: - - * *migr_prob* -- migration probability (defaults to 1) - * *s_policy* -- migration selection policy (defaults to 'best selection' policy) - * *r_policy* -- migration replacement policy (defaults to 'fair replacement' policy) - - """ - from PyGMO.algorithm._algorithm import _base as _base_algorithm - from PyGMO.algorithm import base as base_algorithm - from PyGMO.problem._problem import _base as _base_problem - from PyGMO.problem._problem import _base_stochastic as _base_problem_stochastic - from PyGMO.problem import base as base_problem - from PyGMO.problem import base_stochastic as base_problem_stochastic - from PyGMO.migration._migration import best_s_policy, fair_r_policy, _base_s_policy, _base_r_policy - - if len(args) < 2 or len(args) > 3: - raise ValueError("Unnamed arguments list must have either 2 or three elements, but %d elements were found instead." % (len(args),)) - if not isinstance(args[0],_base_algorithm): - raise TypeError("The first unnamed argument must be an algorithm.") - ctor_args = [args[0]] - if isinstance(args[1],_base_problem) or isinstance(args[1],_base_problem_stochastic): - ctor_args.append(args[1]) - if len(args) == 3: - if not isinstance(args[2],int): - raise TypeError("Please provide an integer for the number of individuals in the island.") - ctor_args.append(args[2]) - else: - ctor_args.append(0) - elif isinstance(args[1],population): - if len(args) == 3: - raise ValueError("When the second unnamed argument is a population, there cannot be a third unnamed argument.") - ctor_args.append(args[1]) - else: - raise TypeError("The second unnamed argument must be either a problem or a population.") - - if 'migr_prob' in kwargs: - ctor_args.append(kwargs['migr_prob']) - else: - ctor_args.append(1.) - if not isinstance(ctor_args[-1],float): - raise TypeError("Migration probability must be a float.") - - if 's_policy' in kwargs: - ctor_args.append(kwargs['s_policy']) - else: - ctor_args.append(best_s_policy()) - if not isinstance(ctor_args[-1],_base_s_policy): - raise TypeError("s_policy must be a migration selection policy.") - - if 'r_policy' in kwargs: - ctor_args.append(kwargs['r_policy']) - else: - ctor_args.append(fair_r_policy()) - if not isinstance(ctor_args[-1],_base_r_policy): - raise TypeError("r_policy must be a migration replacement policy.") - - if isinstance(self,base_island): - super(type(self),self).__init__(*ctor_args) - elif isinstance(self,_base_island): - self.__original_init__(*ctor_args) - else: - assert(self is None) - n_pythonic_items = 0 - if isinstance(args[0],base_algorithm): - n_pythonic_items += 1 - if isinstance(args[1],base_problem) or isinstance(args[1],base_problem_stochastic): - n_pythonic_items += 1 - elif isinstance(args[1],population) and (isinstance(args[1].problem,base_problem) or isinstance(args[1],base_problem_stochastic)): - n_pythonic_items += 1 - if n_pythonic_items > 0: - return py_island(*args,**kwargs) - else: - return local_island(*args,**kwargs) + + def __init__(self, *args): + if len(args) == 0: + raise ValueError( + "Cannot initialise base island without parameters for the constructor.") + _core._base_island.__init__(self, *args) + + def get_name(self): + return str(type(self)) + + def __get_deepcopy__(self): + from copy import deepcopy + return deepcopy(self) + + +def _generic_island_ctor(self, *args, **kwargs): + """Unnamed arguments: + + #. algorithm + #. problem or population + #. number of individuals (optional and valid only if the second argument is a problem, defaults to 0 if not specified) + + Keyword arguments: + + * *migr_prob* -- migration probability (defaults to 1) + * *s_policy* -- migration selection policy (defaults to 'best selection' policy) + * *r_policy* -- migration replacement policy (defaults to 'fair replacement' policy) + + """ + from PyGMO.algorithm._algorithm import _base as _base_algorithm + from PyGMO.algorithm import base as base_algorithm + from PyGMO.problem._problem import _base as _base_problem + from PyGMO.problem._problem import _base_stochastic as _base_problem_stochastic + from PyGMO.problem import base as base_problem + from PyGMO.problem import base_stochastic as base_problem_stochastic + from PyGMO.migration._migration import best_s_policy, fair_r_policy, _base_s_policy, _base_r_policy + + if len(args) < 2 or len(args) > 3: + raise ValueError( + "Unnamed arguments list must have either 2 or three elements, but %d elements were found instead." % + (len(args),)) + if not isinstance(args[0], _base_algorithm): + raise TypeError("The first unnamed argument must be an algorithm.") + ctor_args = [args[0]] + if isinstance(args[1], _base_problem) or isinstance(args[1], _base_problem_stochastic): + ctor_args.append(args[1]) + if len(args) == 3: + if not isinstance(args[2], int): + raise TypeError( + "Please provide an integer for the number of individuals in the island.") + ctor_args.append(args[2]) + else: + ctor_args.append(0) + elif isinstance(args[1], population): + if len(args) == 3: + raise ValueError( + "When the second unnamed argument is a population, there cannot be a third unnamed argument.") + ctor_args.append(args[1]) + else: + raise TypeError( + "The second unnamed argument must be either a problem or a population.") + + if 'migr_prob' in kwargs: + ctor_args.append(kwargs['migr_prob']) + else: + ctor_args.append(1.) + if not isinstance(ctor_args[-1], float): + raise TypeError("Migration probability must be a float.") + + if 's_policy' in kwargs: + ctor_args.append(kwargs['s_policy']) + else: + ctor_args.append(best_s_policy()) + if not isinstance(ctor_args[-1], _base_s_policy): + raise TypeError("s_policy must be a migration selection policy.") + + if 'r_policy' in kwargs: + ctor_args.append(kwargs['r_policy']) + else: + ctor_args.append(fair_r_policy()) + if not isinstance(ctor_args[-1], _base_r_policy): + raise TypeError("r_policy must be a migration replacement policy.") + + if isinstance(self, base_island): + super(type(self), self).__init__(*ctor_args) + elif isinstance(self, _base_island): + self.__original_init__(*ctor_args) + else: + assert(self is None) + n_pythonic_items = 0 + if isinstance(args[0], base_algorithm): + n_pythonic_items += 1 + if isinstance(args[1], base_problem) or isinstance(args[1], base_problem_stochastic): + n_pythonic_items += 1 + elif isinstance(args[1], population) and (isinstance(args[1].problem, base_problem) or isinstance(args[1], base_problem_stochastic)): + n_pythonic_items += 1 + if n_pythonic_items > 0: + return py_island(*args, **kwargs) + else: + return local_island(*args, **kwargs) local_island.__original_init__ = local_island.__init__ local_island.__init__ = _generic_island_ctor # This is the function that will be called by the separate process # spawned from py_island. -def _process_target(q,a,p): - try: - tmp = a.evolve(p) - q.put(tmp) - except BaseException as e: - q.put(e) + + +def _process_target(q, a, p): + try: + tmp = a.evolve(p) + q.put(tmp) + except BaseException as e: + q.put(e) + class py_island(base_island): - """Python island. - - This island will launch evolutions using the multiprocessing module, available since Python 2.6. - Each evolution is transparently dispatched to a Python interpreter in a separate process. - - """ - __init__ = _generic_island_ctor - def _perform_evolution(self,algo,pop): - try: - import multiprocessing as mp - q = mp.Queue() - # Apparently creating/starting processes is _not_ thread safe: - # http://bugs.python.org/issue1731717 - # http://stackoverflow.com/questions/1359795/error-while-using-multiprocessing-module-in-a-python-daemon - # Protect with a global lock. - with _process_lock: - process = mp.Process(target = _process_target, args = (q,algo,pop)) - process.start() - retval = q.get() - with _process_lock: - process.join() - if isinstance(retval,BaseException): - raise retval - return retval - except BaseException as e: - print('Exception caught during evolution:') - print(e) - raise RuntimeError() - def get_name(self): - return "Python multiprocessing island" + + """Python island. + + This island will launch evolutions using the multiprocessing module, available since Python 2.6. + Each evolution is transparently dispatched to a Python interpreter in a separate process. + + """ + __init__ = _generic_island_ctor + + def _perform_evolution(self, algo, pop): + try: + import multiprocessing as mp + q = mp.Queue() + # Apparently creating/starting processes is _not_ thread safe: + # http://bugs.python.org/issue1731717 + # http://stackoverflow.com/questions/1359795/error-while-using-multiprocessing-module-in-a-python-daemon + # Protect with a global lock. + with _process_lock: + process = mp.Process( + target=_process_target, args=(q, algo, pop)) + process.start() + retval = q.get() + with _process_lock: + process.join() + if isinstance(retval, BaseException): + raise retval + return retval + except BaseException as e: + print('Exception caught during evolution:') + print(e) + raise RuntimeError() + + def get_name(self): + return "Python multiprocessing island" # This is the function that will be called by the task client # in ipy_island. -def _maptask_target(a,p): - try: - return a.evolve(p) - except BaseException as e: - return e -class ipy_island(base_island): - """Parallel IPython island. - - This island will launch evolutions using IPython's MapTask interface. The evolution will be dispatched - to IPython engines that, depending on the configuration of IPython/ipcluster, can reside either on the - local machine or on other remote machines. - - See: http://ipython.scipy.org/doc/stable/html/parallel/index.html - - """ - # NOTE: when using an IPython island, on quitting IPython there might be a warning message - # reporting an exception being ignored. This seems to be a problem in the foolscap library: - # http://foolscap.lothar.com/trac/ticket/147 - # Hopefully it will be fixed in the next versions of the library. - __init__ = _generic_island_ctor - def _perform_evolution(self,algo,pop): - try: - from IPython.kernel.client import TaskClient, MapTask - # Create task client. - tc = TaskClient() - # Create the task. - mt = MapTask(_maptask_target,args = (algo,pop)) - # Run the task. - task_id = tc.run(mt) - # Get retval. - retval = tc.get_task_result(task_id,block = True) - if isinstance(retval,BaseException): - raise retval - return retval - except BaseException as e: - print('Exception caught during evolution:') - print(e) - raise RuntimeError() - def get_name(self): - return "Parallel IPython island" - -def island(*args,**kwargs): - return _generic_island_ctor(None,*args,**kwargs) - -island.__doc__ = '\n'.join(['Island factory function.\n\nThis function will return an instance of an island object\nbuilt according to the following rule: '+ - 'if the arguments include\neither a pythonic problem or a pythonic algorithm, then an instance\nof :class:`py_island` will be returned; '+ - 'otherwise, an instance of\n:class:`local_island` will be returned.'] + [s.replace('\t','') for s in _generic_island_ctor.__doc__.split('\n')]) - -del s - -def _get_island_list(): - from PyGMO import core - names = filter(lambda n: not n.startswith('_') and not n.startswith('base') and n.endswith('_island'),dir(core)) - try: - from IPython.kernel.client import TaskClient, MapTask - except ImportError: - names = filter(lambda n: n != 'ipy_island',names) - return [core.__dict__[n] for n in names] - -def _generic_archi_ctor(self,*args,**kwargs): - """ - Unnamed arguments (optional): - - #. algorithm - #. problem - #. number of islands - #. number individual in the population - - Keyword arguments: - - * *topology* -- migration topology (defaults to unconnected) - * *distribution_type* -- distribution_type (defaults to distribution_type.point_to_point) - * *migration_direction* -- migration_direction (defaults to migration_direction.destination) - """ - - from PyGMO import topology, algorithm,problem - from difflib import get_close_matches - - if not((len(args)==4) or (len(args)==0)): - raise ValueError("Unnamed arguments list, when present, must be of length 4, but %d elements were found instead" % (len(args),)) - - #Append everything in the same list of constructor arguments - ctor_args = [] - for i in args: - ctor_args.append(i) - - #Pop all known keywords out of kwargs and add a default value if not provided - ctor_args.append(kwargs.pop('topology', topology.unconnected())) #unconnected is default - ctor_args.append(kwargs.pop('distribution_type', distribution_type.point_to_point)) #point-to-point is default - ctor_args.append(kwargs.pop('migration_direction', migration_direction.destination)) #destination is default - - #Check for unknown keywords - kwlist = ['topology', 'distribution_type', 'migration_direction'] - if kwargs: - s = "The following unknown keyworded argument was passed to the construtor: " - for kw in kwargs: - s += kw - spam = get_close_matches(kw, kwlist) - if spam: - s += " (Did you mean %s?), " % spam[0] - else: - s += ", " - - raise ValueError(s[:-2]) - - #Constructs an empty archipelago with no islands using the C++ constructor - self.__original_init__(*ctor_args[-3:]) - - #We now push back the correct island type if required - if (len(args))==4: - if not isinstance(args[0],algorithm._base): - raise TypeError("The first unnamed argument must be an algorithm") - if not (isinstance(args[1],problem._base) or isinstance(args[1],problem._base_stochastic)): - raise TypeError("The second unnamed argument must be a problem") - if not isinstance(args[2],int): - raise TypeError("The third unnamed argument must be an integer (i.e. number of islands)") - if not isinstance(args[3],int): - raise TypeError("The fourth unnamed argument must be an integer (i.e. population size)") - for n in range(args[2]): - self.push_back(island(args[0],args[1],args[3])) -archipelago.__original_init__ = archipelago.__init__ -archipelago.__init__ = _generic_archi_ctor +def _maptask_target(a, p): + try: + return a.evolve(p) + except BaseException as e: + return e -def _archipelago_draw(self, layout = 'spring', n_color = 'fitness', n_size = 15, n_alpha = 0.5, e_alpha = 0.1, e_arrows=False, scale_by_degree = False, cmap = 'default'): - """ - Draw a visualization of the archipelago using networkx. - - USAGE: pos = archipelago.draw(layout = 'spring', color = 'fitness', n_size = 15, scale_by_degree = False, n_alpha = 0.5, e_alpha = 0.1, cmap = 'default', e_arrows=False) - - * layout: Network layout. Can be 'spring' or 'circular' or a list of values pos returned - by a previous call of the method (so that positions of the islands can be kept fixed. - * n_color = Defines the color code for the nodes. Can be one of 'fitness', 'links', ... or the standard matplotlib 'blue' .. etc. - * n_size: The size of nodes. Becomes scaling factor when scale_by_degree=True. - * n_alpha: Transparency of nodes. Takes value between 0 and 1. - * e_arrows: Plots arrows on the edges for directed graphs - * e_elpha: Transparency of edges. Takes value between 0 and 1. - * scale_by_degree: When True, nodes will be sized proportional to their degree. - * cmap: color map. one in matplotlib.pyplot.cm - """ - try: - import networkx as nx - except ImportError: - raise ImportError('Could not import the networkx module.') - try: - import matplotlib.pyplot as pl - except ImportError: - raise ImportError('Could not improt the MatPlotLib module.') - - #We set the graph in networkx - t = self.topology - G = t.to_networkx() - - #We scale the node sizes - node_sizes = range(nx.number_of_nodes(G)) - for i in range(nx.number_of_nodes(G)): - if scale_by_degree: - node_sizes[i] = nx.degree(G,i)*n_size - else: - node_sizes[i] = n_size - - #We compute the layout - if layout == 'spring': - pos = nx.spring_layout(G) - elif layout == "circular": - pos = nx.circular_layout(G) - else: - pos = layout - - #We compute the color_code - if n_color == 'fitness': - node_colors=[-isl.population.champion.f[0] for isl in self] - m = min(node_colors) - M = max(node_colors) - elif n_color == 'links': - m = min(node_colors) - M = max(node_colors) - node_colors=[t.get_num_adjacent_vertices(i) for i in range(len(self))] - elif n_color == 'rank': - vec = [-isl.population.champion.f[0] for isl in self] - node_colors=sorted(range(len(vec)), key=vec.__getitem__) - M = max(node_colors) - m= min(node_colors) - - else: - node_colors=n_color - m=0; - M=0; - - if not m==M: - node_colors=[(node_colors[i] - float(m))/(M-m) for i in range(len(self))] - - #And we draw the archipelago ..... - pl.figure() - if cmap == 'default': - cmap = pl.cm.Reds_r - nx.draw_networkx_nodes(G,pos,nodelist=range(len(self)), node_color=node_colors, cmap=cmap, node_size=node_sizes,alpha=n_alpha) - nx.draw_networkx_edges(G,pos,alpha=e_alpha,arrows=e_arrows) - pl.axis('off') - pl.show() - return pos -archipelago.draw = _archipelago_draw +class ipy_island(base_island): -def _pop_plot_pareto_fronts(pop, rgb=(0,0,0), comp = [0,1], symbol = 'o', size = 6): - """ - Plots the population pareto front in a 2-D graph + """Parallel IPython island. + + This island will launch evolutions using IPython's MapTask interface. The evolution will be dispatched + to IPython engines that, depending on the configuration of IPython/ipcluster, can reside either on the + local machine or on other remote machines. + + See: http://ipython.scipy.org/doc/stable/html/parallel/index.html + + """ + # NOTE: when using an IPython island, on quitting IPython there might be a warning message + # reporting an exception being ignored. This seems to be a problem in the foolscap library: + # http://foolscap.lothar.com/trac/ticket/147 + # Hopefully it will be fixed in the next versions of the library. + __init__ = _generic_island_ctor + + def _perform_evolution(self, algo, pop): + try: + from IPython.kernel.client import TaskClient, MapTask + # Create task client. + tc = TaskClient() + # Create the task. + mt = MapTask(_maptask_target, args=(algo, pop)) + # Run the task. + task_id = tc.run(mt) + # Get retval. + retval = tc.get_task_result(task_id, block=True) + if isinstance(retval, BaseException): + raise retval + return retval + except BaseException as e: + print('Exception caught during evolution:') + print(e) + raise RuntimeError() + + def get_name(self): + return "Parallel IPython island" + + +def island(*args, **kwargs): + return _generic_island_ctor(None, *args, **kwargs) + +island.__doc__ = '\n'.join(['Island factory function.\n\nThis function will return an instance of an island object\nbuilt according to the following rule: ' + + 'if the arguments include\neither a pythonic problem or a pythonic algorithm, then an instance\nof :class:`py_island` will be returned; ' + + 'otherwise, an instance of\n:class:`local_island` will be returned.'] + + [s.replace('\t', '') for s in _generic_island_ctor.__doc__.split('\n')]) +# The following is necessary for Python 2. s remains in the workspace and will cause the error: +# AttributeError: 'module' object has no attribute 's' +# However in Python 3 s is not anylonger in the workspace and del s will +# cause an exception! +if 's' in globals(): + del s - USAGE: pop.plot_pareto_front(comp = [0,1], rgb=(0,1,0)) - * comp: components of the fitness function to plot in the 2-D window - * rgb: specify the color of the 1st front (use strong colors here) - * symbol: marker for the individual - * size: size of the markersymbol - """ - from numpy import linspace - import matplotlib.pyplot as plt +def _get_island_list(): + from PyGMO import core + names = [n for n in dir(core) if not n.startswith( + '_') and not n.startswith('base') and n.endswith('_island')] + try: + from IPython.kernel.client import TaskClient, MapTask + except ImportError: + names = [n for n in names if n != 'ipy_island'] + return [core.__dict__[n] for n in names] + + +def _generic_archi_ctor(self, *args, **kwargs): + """ + Unnamed arguments (optional): + + #. algorithm + #. problem + #. number of islands + #. number individual in the population + + Keyword arguments: + + * *topology* -- migration topology (defaults to unconnected) + * *distribution_type* -- distribution_type (defaults to distribution_type.point_to_point) + * *migration_direction* -- migration_direction (defaults to migration_direction.destination) + """ + + from PyGMO import topology, algorithm, problem + from difflib import get_close_matches + + if not((len(args) == 4) or (len(args) == 0)): + raise ValueError( + "Unnamed arguments list, when present, must be of length 4, but %d elements were found instead" % + (len(args),)) + + # Append everything in the same list of constructor arguments + ctor_args = [] + for i in args: + ctor_args.append(i) + + # Pop all known keywords out of kwargs and add a default value if not + # provided + # unconnected is default + ctor_args.append(kwargs.pop('topology', topology.unconnected())) + # point-to-point is default + ctor_args.append( + kwargs.pop('distribution_type', distribution_type.point_to_point)) + # destination is default + ctor_args.append( + kwargs.pop('migration_direction', migration_direction.destination)) + + # Check for unknown keywords + kwlist = ['topology', 'distribution_type', 'migration_direction'] + if kwargs: + s = "The following unknown keyworded argument was passed to the construtor: " + for kw in kwargs: + s += kw + spam = get_close_matches(kw, kwlist) + if spam: + s += " (Did you mean %s?), " % spam[0] + else: + s += ", " + + raise ValueError(s[:-2]) + + # Constructs an empty archipelago with no islands using the C++ constructor + self.__original_init__(*ctor_args[-3:]) + + # We now push back the correct island type if required + if (len(args)) == 4: + if not isinstance(args[0], algorithm._base): + raise TypeError("The first unnamed argument must be an algorithm") + if not (isinstance(args[1], problem._base) or isinstance(args[1], problem._base_stochastic)): + raise TypeError("The second unnamed argument must be a problem") + if not isinstance(args[2], int): + raise TypeError( + "The third unnamed argument must be an integer (i.e. number of islands)") + if not isinstance(args[3], int): + raise TypeError( + "The fourth unnamed argument must be an integer (i.e. population size)") + for n in range(args[2]): + self.push_back(island(args[0], args[1], args[3])) - if len(comp) !=2: - raise ValueError('Invalid components of the objective function selected for plot') +archipelago.__original_init__ = archipelago.__init__ +archipelago.__init__ = _generic_archi_ctor - p_dim = pop.problem.f_dimension - if p_dim == 1: - raise ValueError('Pareto fronts of a 1-dimensional problem cannot be plotted') +def _archipelago_draw( + self, + layout='spring', + n_color='fitness', + n_size=15, + n_alpha=0.5, + e_alpha=0.1, + e_arrows=False, + scale_by_degree=False, + cmap='default'): + """ + Draw a visualization of the archipelago using networkx. + + USAGE: pos = archipelago.draw(layout = 'spring', color = 'fitness', n_size = 15, scale_by_degree = False, n_alpha = 0.5, e_alpha = 0.1, cmap = 'default', e_arrows=False) + + * layout: Network layout. Can be 'spring' or 'circular' or a list of values pos returned + by a previous call of the method (so that positions of the islands can be kept fixed. + * n_color = Defines the color code for the nodes. Can be one of 'fitness', 'links', ... or the standard matplotlib 'blue' .. etc. + * n_size: The size of nodes. Becomes scaling factor when scale_by_degree=True. + * n_alpha: Transparency of nodes. Takes value between 0 and 1. + * e_arrows: Plots arrows on the edges for directed graphs + * e_elpha: Transparency of edges. Takes value between 0 and 1. + * scale_by_degree: When True, nodes will be sized proportional to their degree. + * cmap: color map. one in matplotlib.pyplot.cm + """ + try: + import networkx as nx + except ImportError: + raise ImportError('Could not import the networkx module.') + try: + import matplotlib.pyplot as pl + except ImportError: + raise ImportError('Could not improt the MatPlotLib module.') + + # We set the graph in networkx + t = self.topology + G = t.to_networkx() + + # We scale the node sizes + node_sizes = list(range(nx.number_of_nodes(G))) + for i in range(nx.number_of_nodes(G)): + if scale_by_degree: + node_sizes[i] = nx.degree(G, i) * n_size + else: + node_sizes[i] = n_size + + # We compute the layout + if layout == 'spring': + pos = nx.spring_layout(G) + elif layout == "circular": + pos = nx.circular_layout(G) + else: + pos = layout + + # We compute the color_code + if n_color == 'fitness': + node_colors = [-isl.population.champion.f[0] for isl in self] + m = min(node_colors) + M = max(node_colors) + elif n_color == 'links': + m = min(node_colors) + M = max(node_colors) + node_colors = [ + t.get_num_adjacent_vertices(i) for i in range(len(self))] + elif n_color == 'rank': + vec = [-isl.population.champion.f[0] for isl in self] + node_colors = sorted(list(range(len(vec))), key=vec.__getitem__) + M = max(node_colors) + m = min(node_colors) + + else: + node_colors = n_color + m = 0 + M = 0 + + if not m == M: + node_colors = [(node_colors[i] - float(m)) / (M - m) + for i in range(len(self))] + + # And we draw the archipelago ..... + ax = pl.figure() + if cmap == 'default': + cmap = pl.cm.Reds_r + nx.draw_networkx_nodes( + G, + pos, + nodelist=list( + range( + len(self))), + node_color=node_colors, + cmap=cmap, + node_size=node_sizes, + alpha=n_alpha) + nx.draw_networkx_edges(G, pos, alpha=e_alpha, arrows=e_arrows) + pl.axis('off') + pl.show() + return pos +archipelago.draw = _archipelago_draw - if not all([c in range(0, p_dim) for c in comp]): - raise ValueError('You need to select valid components of the objective function') +def _pop_ctor(self, prob_or_pop, n_individuals=0, seed=None): + """ + Constructs a population. + + Popopulation can be constructed in two ways, specified by prob_or_pop. If + prob_or_pop is a population (see USAGE 2 below), the other two arguments + are ignored, and the population is constructed by performing a deep-copy of + the provided population. + + USAGE 1: pop = population(problem.zdt(), n_individuals=100, seed=1234) + + * prob_or_prob: problem to be associated with the population + * n_individuals: number of individuals in the population + * seed: seed used to randomly initialize the individuals + + USAGE 2: + from PyGMO import * + pop1 = population(problem.schwefel(50), 10) #population with 10 individuals + pop2 = population(pop1) # pop2 is a copy of pop1 + """ + + arg_list = [] + arg_list.append(prob_or_pop) + # For construction by copying, ignore the rest of the arguments (could be + # the default kwargs). + if not isinstance(prob_or_pop, population): + arg_list.append(n_individuals) + if seed is not None: + arg_list.append(seed) + return self._original_init(*arg_list) +population._original_init = population.__init__ +population.__init__ = _pop_ctor + + +def _pop_plot_pareto_fronts( + pop, + rgb=( + 0, + 0, + 0), + comp = [ + 0, + 1], + symbol = 'o', + size = 6, + fronts=[]): + """ + Plots the population pareto front in a 2-D graph + + USAGE: pop.plot_pareto_front(comp = [0,1], rgb=(0,1,0)) + + * comp: components of the fitness function to plot in the 2-D window + * rgb: specify the color of the 1st front (use strong colors here) + * symbol: marker for the individual + * size: size of the markersymbol + * fronts: list of fronts to be plotted (use [0] to only show the first) + """ + from numpy import linspace + import matplotlib.pyplot as plt + + if len(comp) != 2: + raise ValueError( + 'Invalid components of the objective function selected for plot') + + p_dim = pop.problem.f_dimension + + if p_dim == 1: + raise ValueError( + 'Pareto fronts of a 1-dimensional problem cannot be plotted') + + if not all([c in range(0, p_dim) for c in comp]): + raise ValueError( + 'You need to select valid components of the objective function') + + p_list = pop.compute_pareto_fronts() + if (len(fronts) > 0): + n = len(p_list) + consistent = [d < n for d in fronts] + if consistent.count(False) > 0: + raise ValueError( + 'Check your fronts list, there seem to be not enough fronts') + p_list = [p_list[idx] for idx in fronts] + + cl = list(zip(linspace(0.9 if rgb[0] else 0.1, 0.9, len(p_list)), + linspace(0.9 if rgb[1] else 0.1, 0.9, len(p_list)), + linspace(0.9 if rgb[2] else 0.1, 0.9, len(p_list)))) + + for id_f, f in enumerate(p_list): + for ind in f: + ax = plt.plot([pop[ind].cur_f[comp[0]]], + [pop[ind].cur_f[comp[1]]], + symbol, + color=cl[id_f], + markersize=size) + x = [pop[ind].cur_f[comp[0]] for ind in f] + y = [pop[ind].cur_f[comp[1]] for ind in f] + tmp = [(a, b) for a, b in zip(x, y)] + tmp = sorted(tmp, key=lambda k: k[0]) + plt.step([c[0] for c in tmp], [c[1] + for c in tmp], color=cl[id_f], where='post') + return ax - p_list = pop.compute_pareto_fronts() - cl = zip(linspace(0.9 if rgb[0] else 0.1,0.9, len(p_list)), - linspace(0.9 if rgb[1] else 0.1,0.9, len(p_list)), - linspace(0.9 if rgb[2] else 0.1,0.9, len(p_list))) +population.plot_pareto_fronts = _pop_plot_pareto_fronts - for id_f,f in enumerate(p_list): - for ind in f: - plt.plot([pop[ind].best_f[comp[0]]],[pop[ind].best_f[comp[1]]], symbol, color=cl[id_f], markersize=size) - x = [pop[ind].best_f[comp[0]] for ind in f] - y = [pop[ind].best_f[comp[1]] for ind in f] - tmp = [(a,b) for a,b in zip(x,y)] - tmp = sorted(tmp, key = lambda k:k[0]) - plt.step([c[0] for c in tmp], [c[1] for c in tmp],color=cl[id_f],where='post') - plt.show() -population.plot_pareto_fronts = _pop_plot_pareto_fronts - -def _pop_race(self, n_winners, min_trials = 0, max_feval = 500, - delta=0.05, racers_idx = [], race_best=True, screen_output=False): - """ - Races individuals in a population - - USAGE: pop.race(n_winners, min_trials = 0, max_feval = 500, delta = 0.05, racers_idx = [], race_best=True, screen_output=False) - - * n_winners: number of winners in the race - * min_trials: minimum amount of evaluations before an individual can stop racing - * max_feval: budget for objective function evaluation - * delta: Statistical test confidence - * racers_idx: indices of the individuals in pop to be raced - * race_best: when True winners are the best, otherwise winners are the worst - * screen_output: produces some screen output at each iteration of the race - """ - arg_list=[] - arg_list.append(n_winners) - arg_list.append(min_trials) - arg_list.append(max_feval) - arg_list.append(delta) - arg_list.append(racers_idx) - arg_list.append(race_best) - arg_list.append(screen_output) - return self._orig_race(*arg_list) +def _pop_race(self, n_winners, min_trials=0, max_feval=500, + delta=0.05, racers_idx=[], race_best=True, screen_output=False): + """ + Races individuals in a population + + USAGE: pop.race(n_winners, min_trials = 0, max_feval = 500, delta = 0.05, racers_idx = [], race_best=True, screen_output=False) + + * n_winners: number of winners in the race + * min_trials: minimum amount of evaluations before an individual can stop racing +* max_feval: budget for objective function evaluation + * delta: Statistical test confidence + * racers_idx: indices of the individuals in pop to be raced + * race_best: when True winners are the best, otherwise winners are the worst + * screen_output: produces some screen output at each iteration of the race + """ + arg_list = [] + arg_list.append(n_winners) + arg_list.append(min_trials) + arg_list.append(max_feval) + arg_list.append(delta) + arg_list.append(racers_idx) + arg_list.append(race_best) + arg_list.append(screen_output) + return self._orig_race(*arg_list) population._orig_race = population.race population.race = _pop_race + def _pop_repair(self, idx, repair_algorithm): - """ - Repairs the individual at the given position - - USAGE: pop.repair(idx, repair_algorithm = _algorithm.jde()) - - * idx: index of the individual to repair - repair_algorithm: optimizer to use as 'repairing' algorithm. It should be able to deal with population of size 1. - """ - arg_list=[] - arg_list.append(idx) - arg_list.append(repair_algorithm) - return self._orig_repair(*arg_list) + """ + Repairs the individual at the given position + + USAGE: pop.repair(idx, repair_algorithm = _algorithm.jde()) + + * idx: index of the individual to repair +repair_algorithm: optimizer to use as 'repairing' algorithm. It should be able to deal with population of size 1. + """ + arg_list = [] + arg_list.append(idx) + arg_list.append(repair_algorithm) + return self._orig_repair(*arg_list) population._orig_repair = population.repair population.repair = _pop_repair diff --git a/PyGMO/core/core.cpp b/PyGMO/core/core.cpp index ee3df2a6..d4daf6b3 100644 --- a/PyGMO/core/core.cpp +++ b/PyGMO/core/core.cpp @@ -92,17 +92,17 @@ static inline void set_##name(type1 &arg1, const type2 &arg2) \ arg1.name = arg2; \ } -TRIVIAL_GETTER_SETTER(population::individual_type,decision_vector,cur_x); -TRIVIAL_GETTER_SETTER(population::individual_type,decision_vector,cur_v); -TRIVIAL_GETTER_SETTER(population::individual_type,fitness_vector,cur_f); -TRIVIAL_GETTER_SETTER(population::individual_type,constraint_vector,cur_c); -TRIVIAL_GETTER_SETTER(population::individual_type,decision_vector,best_x); -TRIVIAL_GETTER_SETTER(population::individual_type,fitness_vector,best_f); -TRIVIAL_GETTER_SETTER(population::individual_type,constraint_vector,best_c); - -TRIVIAL_GETTER_SETTER(population::champion_type,decision_vector,x); -TRIVIAL_GETTER_SETTER(population::champion_type,fitness_vector,f); -TRIVIAL_GETTER_SETTER(population::champion_type,constraint_vector,c); +TRIVIAL_GETTER_SETTER(population::individual_type,decision_vector,cur_x) +TRIVIAL_GETTER_SETTER(population::individual_type,decision_vector,cur_v) +TRIVIAL_GETTER_SETTER(population::individual_type,fitness_vector,cur_f) +TRIVIAL_GETTER_SETTER(population::individual_type,constraint_vector,cur_c) +TRIVIAL_GETTER_SETTER(population::individual_type,decision_vector,best_x) +TRIVIAL_GETTER_SETTER(population::individual_type,fitness_vector,best_f) +TRIVIAL_GETTER_SETTER(population::individual_type,constraint_vector,best_c) + +TRIVIAL_GETTER_SETTER(population::champion_type,decision_vector,x) +TRIVIAL_GETTER_SETTER(population::champion_type,fitness_vector,f) +TRIVIAL_GETTER_SETTER(population::champion_type,constraint_vector,c) // Wrappers to make functions taking size_type as input take integers instead, with safety checks. inline static base_island_ptr archipelago_get_island(const archipelago &a, int n) @@ -257,7 +257,6 @@ BOOST_PYTHON_MODULE(_core) common_module_init(); typedef boost::array array2D; - typedef std::pair, int> new_pair; //Register std converters to lists if not already registered by some other module REGISTER_CONVERTER(array2D,fixed_size_policy); REGISTER_CONVERTER(std::vector, variable_capacity_policy); diff --git a/PyGMO/core/python_base_island.h b/PyGMO/core/python_base_island.h index e97ebd6a..8f216caf 100644 --- a/PyGMO/core/python_base_island.h +++ b/PyGMO/core/python_base_island.h @@ -261,6 +261,6 @@ inline void load_construct_data(Archive &, pagmo::python_base_island *isl, const }} //namespaces -BOOST_CLASS_EXPORT(pagmo::python_base_island); +BOOST_CLASS_EXPORT(pagmo::python_base_island) #endif diff --git a/PyGMO/core/python_island.h b/PyGMO/core/python_island.h index 53e6b863..98aec510 100644 --- a/PyGMO/core/python_island.h +++ b/PyGMO/core/python_island.h @@ -174,6 +174,6 @@ inline void load_construct_data(Archive &, pagmo::python_island *isl, const unsi }} //namespaces -BOOST_CLASS_EXPORT(pagmo::python_island); +BOOST_CLASS_EXPORT(pagmo::python_island) #endif diff --git a/PyGMO/examples/benchmark_racing.py b/PyGMO/examples/benchmark_racing.py index 2edbf41f..050115f5 100644 --- a/PyGMO/examples/benchmark_racing.py +++ b/PyGMO/examples/benchmark_racing.py @@ -8,11 +8,13 @@ stochastic_type = 'ROBUST' base_problem = problem.ackley(10) + class post_eval: """ Obtain the post-evaluated fitness via repeated averaing over different seeds. """ + def __init__(self, post_eval_prob, post_eval_n=500, seed=5): self.post_eval_prob = post_eval_prob self.post_eval_n = post_eval_n @@ -21,20 +23,38 @@ def __init__(self, post_eval_prob, post_eval_n=500, seed=5): def objfun(self, x): post_f = 0 np.random.seed(self.seed) - for i in xrange(self.post_eval_n): + for i in range(self.post_eval_n): self.post_eval_prob.seed = np.random.randint(1000000) - post_f += self.post_eval_prob.objfun(x)[0] / float(self.post_eval_n) + post_f += self.post_eval_prob.objfun(x)[0] / \ + float(self.post_eval_n) return (post_f,) -def start_experiment(num_trials=20, pop_size=40, fevals_max=100000, nr_eval_per_x=40, noise_level=0.05, seed=123): + +def start_experiment( + num_trials=20, + pop_size=40, + fevals_max=100000, + nr_eval_per_x=40, + noise_level=0.05, + seed=123): # 1. Set up the problem if(stochastic_type == 'NOISY'): - prob_single_eval = problem.noisy(base_problem, trials = 1, param_second = noise_level, noise_type = problem.noisy.noise_distribution.UNIFORM) - prob_regular = problem.noisy(base_problem, trials = nr_eval_per_x, param_second = noise_level, noise_type = problem.noisy.noise_distribution.UNIFORM) + prob_single_eval = problem.noisy( + base_problem, + trials=1, + param_second=noise_level, + noise_type=problem.noisy.noise_distribution.UNIFORM) + prob_regular = problem.noisy( + base_problem, + trials=nr_eval_per_x, + param_second=noise_level, + noise_type=problem.noisy.noise_distribution.UNIFORM) else: - prob_single_eval = problem.robust(base_problem, trials = 1, rho = noise_level) - prob_regular = problem.robust(base_problem, trials = nr_eval_per_x, rho = noise_level) + prob_single_eval = problem.robust( + base_problem, trials=1, rho=noise_level) + prob_regular = problem.robust( + base_problem, trials=nr_eval_per_x, rho=noise_level) #prob_large_trials = problem.robust(prob_orig, trials = 500, rho = noise_level) #prob_post_eval = post_eval(prob_large_trials, 5) @@ -54,57 +74,74 @@ def start_experiment(num_trials=20, pop_size=40, fevals_max=100000, nr_eval_per_ # evaluations. Ignoring the cost of initialization here. # NOTE: No need to scale down if both algo has the same version of problem if SAME_PROB: - gen_budget = fevals_max/(2*pop_size) + gen_budget = fevals_max / (2 * pop_size) else: - gen_budget = fevals_max/(2*pop_size*nr_eval_per_x) - print 'Non-racing pso gen will evolve for %d generations' % gen_budget - algo_psogen = algorithm.pso_gen(gen_budget,0.7298,2.05,2.05,0.05,5,2,4) - + gen_budget = fevals_max / (2 * pop_size * nr_eval_per_x) + print('Non-racing pso gen will evolve for %d generations' % gen_budget) + algo_psogen = algorithm.pso_gen( + gen_budget, 0.7298, 2.05, 2.05, 0.05, 5, 2, 4) + # 2B. Set up pso_gen algorithm with racing: # Setting gen number to be an arbitrarily large number, let fevals # decide when to terminate. nr_eval_per_x_racing = nr_eval_per_x - algo_psogen_racing = algorithm.pso_gen_racing(1000000,0.7298,2.05,2.05,0.05,5,2,4,nr_eval_per_x_racing,fevals_max) + algo_psogen_racing = algorithm.pso_gen_racing( + 1000000, + 0.7298, + 2.05, + 2.05, + 0.05, + 5, + 2, + 4, + nr_eval_per_x_racing, + fevals_max) # TODO: Use below to check the sanity of racing in factoring out the effect of exceeded fevals # algo_with_racing = algorithm.pso_gen_racing(gen_budget,0.7298,2.05,2.05,0.05,5,2,4,nr_eval_per_x_racing,999999999) # 3. Run both algorithms and record their performance if SAME_PROB: - algo_prob_pairs = [(algo_psogen, prob_regular), (algo_psogen_racing, prob_regular)] + algo_prob_pairs = [ + (algo_psogen, prob_regular), (algo_psogen_racing, prob_regular)] else: - algo_prob_pairs = [(algo_psogen, prob_regular), (algo_psogen_racing, prob_single_eval)] + algo_prob_pairs = [ + (algo_psogen, + prob_regular), + (algo_psogen_racing, + prob_single_eval)] post_evaluated_fitnesses = [] np.random.seed(seed) for i in range(num_trials): - print '::: Trial #%d :::' % i + print('::: Trial #%d :::' % i) results = [] seed += np.random.randint(100000) - for algo, prob in algo_prob_pairs: + for algo, prob in algo_prob_pairs: algo.reset_rngs(seed) - # Seed used to ensure both algorithm evolves an identical population + # Seed used to ensure both algorithm evolves an identical + # population pop = population(prob, pop_size, seed) pop = algo.evolve(pop) #winner_idx = pop.race(1)[0][0]; - #print "race winner", winner_idx, "vs champion idx", pop.get_best_idx() + #print("race winner", winner_idx, "vs champion idx", pop.get_best_idx()) #champion_true_fitness = prob_orig.objfun(pop[winner_idx].cur_x) champion_true_fitness = prob_post_eval.objfun(pop.champion.x)[0] - #print 'Final champion =', champion_true_fitness + #print('Final champion =', champion_true_fitness) results.append(champion_true_fitness) - print results + print(results) post_evaluated_fitnesses.append(results) - post_evaluated_fitnesses = zip(*post_evaluated_fitnesses) + post_evaluated_fitnesses = list(zip(*post_evaluated_fitnesses)) averaged_no_racing = np.mean(post_evaluated_fitnesses[0]) averaged_racing = np.mean(post_evaluated_fitnesses[1]) - print '----------------------------------------------' - print 'Final averaged actual fitness over %d trials:' % num_trials - print 'pso_gen without racing: %f' % averaged_no_racing - print 'pso_gen with racing: %f' % averaged_racing - print '----------------------------------------------' + print('----------------------------------------------') + print('Final averaged actual fitness over %d trials:' % num_trials) + print('pso_gen without racing: %f' % averaged_no_racing) + print('pso_gen with racing: %f' % averaged_racing) + print('----------------------------------------------') return (averaged_no_racing, averaged_racing) @@ -113,7 +150,7 @@ def vary_nr_eval_per_x(default_params): pars = copy.deepcopy(default_params) - param_list = range(3,20,2) + param_list = list(range(3, 20, 2)) f_no_racing_list = [] f_racing_list = [] for n in param_list: @@ -129,14 +166,16 @@ def vary_nr_eval_per_x(default_params): plt.xlabel('nr_eval_per_x') plt.ylabel('Post-evaluated fitness') prob_stat = '%s-%s' % (stochastic_type, base_problem.get_name()) - plt.title('%s\nPSO: With/without racing (fevals=%d) (%d trials)' % (prob_stat, pars['fevals_max'], pars['num_trials'])) + plt.title('%s\nPSO: With/without racing (fevals=%d) (%d trials)' % + (prob_stat, pars['fevals_max'], pars['num_trials'])) #plt.savefig('%s-psogenracing-nr_eval_per_x.png' % prob_stat) + def vary_neighbourhood_size(default_params): pars = copy.deepcopy(default_params) - param_list = np.linspace(0.01,0.2,num=20) + param_list = np.linspace(0.01, 0.2, num=20) f_no_racing_list = [] f_racing_list = [] for p in param_list: @@ -148,18 +187,20 @@ def vary_neighbourhood_size(default_params): plt.figure() plt.plot(param_list, f_racing_list, '-o') plt.plot(param_list, f_no_racing_list, '-s') - plt.legend(['PSO racing', 'PSO without racing'],loc='best') + plt.legend(['PSO racing', 'PSO without racing'], loc='best') plt.xlabel('Robust\'s neighbourhood size') plt.ylabel('Post-evaluated fitness') prob_stat = '%s-%s' % (stochastic_type, base_problem.get_name()) - plt.title('%s\nPSO: With/without racing (fevals=%d) (%d trials)' % (prob_stat, pars['fevals_max'], pars['num_trials'])) + plt.title('%s\nPSO: With/without racing (fevals=%d) (%d trials)' % + (prob_stat, pars['fevals_max'], pars['num_trials'])) #plt.savefig('%s-psogenracing-robust_neighbourhood_small.png' % prob_stat) + def vary_fevals_budget(num_trials=20, nr_eval_per_x=10, nb_size=0.5): pars = copy.deepcopy(default_params) - param_list = range(10000,200000,20000) + param_list = list(range(10000, 200000, 20000)) f_no_racing_list = [] f_racing_list = [] for fevals_max in param_list: @@ -171,18 +212,25 @@ def vary_fevals_budget(num_trials=20, nr_eval_per_x=10, nb_size=0.5): plt.figure() plt.plot(param_list, f_racing_list, '-o') plt.plot(param_list, f_no_racing_list, '-s') - plt.legend(['PSO racing', 'PSO without racing'],loc='best') + plt.legend(['PSO racing', 'PSO without racing'], loc='best') plt.xlabel('Evaluation budget (# of fevals)') plt.ylabel('Post-evaluated fitness') prob_stat = '%s-%s' % (stochastic_type, base_problem.get_name()) - plt.title('%s\nPSO: With/without racing (neighbourhood size = %.2f) (%d trials)' % (prob_stat, pars['noise_level'], pars['num_trials'])) + plt.title( + '%s\nPSO: With/without racing (neighbourhood size = %.2f) (%d trials)' % + (prob_stat, pars['noise_level'], pars['num_trials'])) #plt.savefig('%s-psogenracing-robust_fevals.png' % prob_stat) if __name__ == '__main__': #start_experiment(num_trials=20, pop_size=20, nr_eval_per_x=20, fevals_max=200000) - default_params = dict(num_trials=10, pop_size=20, nr_eval_per_x=10, fevals_max=100000, noise_level=0.3) + default_params = dict( + num_trials=10, + pop_size=20, + nr_eval_per_x=10, + fevals_max=100000, + noise_level=0.3) - vary_nr_eval_per_x(default_params) + vary_nr_eval_per_x(default_params) vary_neighbourhood_size(default_params) - ##vary_fevals_budget(default_params) + # vary_fevals_budget(default_params) diff --git a/PyGMO/examples/racing.py b/PyGMO/examples/racing.py index 276bba89..dfbbce2a 100644 --- a/PyGMO/examples/racing.py +++ b/PyGMO/examples/racing.py @@ -4,152 +4,177 @@ import matplotlib.pyplot as plt import copy -def brute_force_average_f(pop_noisy, num_winner, eval_budget): - """ - Allocate evenly the evaluation budget to all the individuals. - The winners will be determined by the averaged objective values. - (Note: Only applicable to single objective, non-constrained problems) - """ - pop_n = len(pop_noisy) - f_mean = [0.0] * pop_n - cnt = [0] * pop_n - eval_total = 0 - - f_prev = [-999.0] * pop_n - pop_noisy_local = copy.deepcopy(pop_noisy) - - while eval_total < eval_budget: - cur_seed = random.randint(0, 100000000) - pop_noisy_local._problem_reference.seed = cur_seed - for (i, p) in enumerate(pop_noisy_local): - newest_f = pop_noisy_local.problem.objfun(p.cur_x)[0] - f_mean[i] = f_mean[i] + newest_f - cnt[i] = cnt[i] + 1 - eval_total = eval_total + 1 - if eval_total >= eval_budget: - break - - f_mean = [f_mean[i] / cnt[i] for i in range(0, pop_n)] - - winners = np.argsort(f_mean)[:num_winner] - - return winners - -def brute_force_average_rank(pop_noisy, num_winner, eval_budget): - """ - Allocate evenly the evaluation budget to all the individuals. - The winners will be determined by the averaged ranking. - """ - pop_n = len(pop_noisy) - rank_sum = [0.0] * pop_n - eval_total = 0 - - pop_noisy_local = copy.deepcopy(pop_noisy) - - while eval_total < eval_budget: - cur_seed = random.randint(0, 100000000) - pop_noisy_local._problem_reference.seed = cur_seed - for (i, p) in enumerate(pop_noisy_local): - pop_noisy_local.set_x(i, p.cur_x) - cur_winners = pop_noisy_local.get_best_idx(pop_n) - for (rank, ind_idx) in enumerate(cur_winners): - rank_sum[ind_idx] += rank - eval_total += pop_n - - winners = np.argsort(rank_sum)[:num_winner] - - return winners - -def get_capture_rates(prob_orig = problem.ackley(10), noise = 0.3, pop_n = 20, num_winner = 4, eval_budget = 200): - """ - Returns a list containing the capture rates of different methods. - - Capture rate: Percentage of matching with the ground truth ordering. - - Example: ground_truth = [1,3,5,7,9], winners = [1,2,3,4,5], capture - rate of winners = 3 / 5 = 0.6 - """ - - prob_noisy = problem.noisy(prob_orig, 1, 0, noise) - - pop_orig = population(prob_orig, pop_n) - pop_noisy = population(prob_noisy) - for p in pop_orig: - pop_noisy.push_back(p.cur_x) - rates = [] - - # Ground truth - winners_orig = pop_orig.get_best_idx(num_winner) - - # Results from different methods: - winners_racing, fevals = pop_noisy.race(num_winner, 0, eval_budget, 0.05, []) - capture_race = 100.0 * sum([int(p in winners_orig) for p in winners_racing]) / num_winner - rates.append(capture_race) - - winners_brute_force_rank = brute_force_average_rank(pop_noisy, num_winner, eval_budget) - capture_bf_rank = 100.0 * sum([int(p in winners_orig) for p in winners_brute_force_rank]) / num_winner - rates.append(capture_bf_rank) - - if(prob_noisy.f_dimension == 1 and prob_noisy.c_dimension == 0): - winners_brute_force_f = brute_force_average_f(pop_noisy, num_winner, eval_budget) - capture_bf_f = 100.0 * sum([int(p in winners_orig) for p in winners_brute_force_f]) / num_winner - rates.append(capture_bf_f) - - return rates +def brute_force_average_f(pop_noisy, num_winner, eval_budget): + """ + Allocate evenly the evaluation budget to all the individuals. + The winners will be determined by the averaged objective values. + (Note: Only applicable to single objective, non-constrained problems) + """ + pop_n = len(pop_noisy) + f_mean = [0.0] * pop_n + cnt = [0] * pop_n + eval_total = 0 -def get_rank_sum_errors(prob_orig = problem.ackley(10), noise = 0.3, pop_n = 20, num_winner = 4, eval_budget = 200): - """ - Returns a list containing the rank-sum error rates of different methods. + f_prev = [-999.0] * pop_n + pop_noisy_local = copy.deepcopy(pop_noisy) - Rank-sum error rates: The same metric used in the C++ tests. + while eval_total < eval_budget: + cur_seed = random.randint(0, 100000000) + pop_noisy_local._problem_reference.seed = cur_seed + for (i, p) in enumerate(pop_noisy_local): + newest_f = pop_noisy_local.problem.objfun(p.cur_x)[0] + f_mean[i] = f_mean[i] + newest_f + cnt[i] = cnt[i] + 1 + eval_total = eval_total + 1 + if eval_total >= eval_budget: + break - The error is defined as the difference between the true ranks sum and the - returned ranks sum. For example if race returns [1,3,5], then the error is - (1+3+5) - (0+1+2) = 6. The allowed error is the size of the returned list, - corresponding, to allow just losing the winner (i.e. [1,2,3] is still valid, but - [1,2,4] not) - """ + f_mean = [f_mean[i] / cnt[i] for i in range(0, pop_n)] - prob_noisy = problem.noisy(prob_orig, 1, 0, noise) + winners = np.argsort(f_mean)[:num_winner] - pop_orig = population(prob_orig, pop_n) + return winners - # Increase the level of difficulty - #algo = algorithm.pso(gen=50) - #pop_orig = algo.evolve(pop_orig) - # True ordering of the individuals - winners_orig = pop_orig.get_best_idx(pop_n) +def brute_force_average_rank(pop_noisy, num_winner, eval_budget): + """ + Allocate evenly the evaluation budget to all the individuals. + The winners will be determined by the averaged ranking. + """ + pop_n = len(pop_noisy) + rank_sum = [0.0] * pop_n + eval_total = 0 + + pop_noisy_local = copy.deepcopy(pop_noisy) + + while eval_total < eval_budget: + cur_seed = random.randint(0, 100000000) + pop_noisy_local._problem_reference.seed = cur_seed + for (i, p) in enumerate(pop_noisy_local): + pop_noisy_local.set_x(i, p.cur_x) + cur_winners = pop_noisy_local.get_best_idx(pop_n) + for (rank, ind_idx) in enumerate(cur_winners): + rank_sum[ind_idx] += rank + eval_total += pop_n + + winners = np.argsort(rank_sum)[:num_winner] + + return winners + + +def get_capture_rates( + prob_orig=problem.ackley(10), + noise=0.3, + pop_n=20, + num_winner=4, + eval_budget=200): + """ + Returns a list containing the capture rates of different methods. + + Capture rate: Percentage of matching with the ground truth ordering. + + Example: ground_truth = [1,3,5,7,9], winners = [1,2,3,4,5], capture + rate of winners = 3 / 5 = 0.6 + """ + + prob_noisy = problem.noisy(prob_orig, 1, 0, noise) + + pop_orig = population(prob_orig, pop_n) + pop_noisy = population(prob_noisy) + for p in pop_orig: + pop_noisy.push_back(p.cur_x) + + rates = [] + + # Ground truth + winners_orig = pop_orig.get_best_idx(num_winner) + + # Results from different methods: + winners_racing, fevals = pop_noisy.race( + num_winner, 0, eval_budget, 0.05, []) + capture_race = 100.0 * sum([int(p in winners_orig) + for p in winners_racing]) / num_winner + rates.append(capture_race) + + winners_brute_force_rank = brute_force_average_rank( + pop_noisy, num_winner, eval_budget) + capture_bf_rank = 100.0 * \ + sum([int(p in winners_orig) + for p in winners_brute_force_rank]) / num_winner + rates.append(capture_bf_rank) + + if(prob_noisy.f_dimension == 1 and prob_noisy.c_dimension == 0): + winners_brute_force_f = brute_force_average_f( + pop_noisy, num_winner, eval_budget) + capture_bf_f = 100.0 * \ + sum([int(p in winners_orig) + for p in winners_brute_force_f]) / num_winner + rates.append(capture_bf_f) + + return rates + + +def get_rank_sum_errors( + prob_orig=problem.ackley(10), + noise=0.3, + pop_n=20, + num_winner=4, + eval_budget=200): + """ + Returns a list containing the rank-sum error rates of different methods. + + Rank-sum error rates: The same metric used in the C++ tests. + + The error is defined as the difference between the true ranks sum and the + returned ranks sum. For example if race returns [1,3,5], then the error is + (1+3+5) - (0+1+2) = 6. The allowed error is the size of the returned list, + corresponding, to allow just losing the winner (i.e. [1,2,3] is still valid, but + [1,2,4] not) + """ + + prob_noisy = problem.noisy(prob_orig, 1, 0, noise) + + pop_orig = population(prob_orig, pop_n) + + # Increase the level of difficulty + #algo = algorithm.pso(gen=50) + #pop_orig = algo.evolve(pop_orig) + + # True ordering of the individuals + winners_orig = pop_orig.get_best_idx(pop_n) - # Individuals are already sorted by their true quality in pop_noisy - pop_noisy = population(prob_noisy) - for ind_idx in winners_orig: - pop_noisy.push_back(pop_orig[ind_idx].cur_x) +# Individuals are already sorted by their true quality in pop_noisy + pop_noisy = population(prob_noisy) + for ind_idx in winners_orig: + pop_noisy.push_back(pop_orig[ind_idx].cur_x) + +# In perfect case, rank sum should be sum([0,1,2,....,num_winner-1]) + ground_truth = ((num_winner - 1) + 1) * (num_winner - 1) / 2 - # In perfect case, rank sum should be sum([0,1,2,....,num_winner-1]) - ground_truth = ((num_winner-1)+1)*(num_winner-1) / 2 + # Worse possible error + # normalize_factor = (((pop_n-1)+1)*(pop_n-1) / 2 - ground_truth) - ground_truth - # Worse possible error - # normalize_factor = (((pop_n-1)+1)*(pop_n-1) / 2 - ground_truth) - ground_truth + errors = [] - errors = [] + # Results from different methods: + winners_racing, fevals = pop_noisy.race( + num_winner, 0, eval_budget, 0.05, []) + error_race = sum(winners_racing) - ground_truth + errors.append(error_race) - # Results from different methods: - winners_racing, fevals = pop_noisy.race(num_winner, 0, eval_budget, 0.05, []) - error_race = sum(winners_racing) - ground_truth - errors.append(error_race) + winners_brute_force_rank = brute_force_average_rank( + pop_noisy, num_winner, eval_budget) + error_bf_rank = sum(winners_brute_force_rank) - ground_truth + errors.append(error_bf_rank) - winners_brute_force_rank = brute_force_average_rank(pop_noisy, num_winner, eval_budget) - error_bf_rank = sum(winners_brute_force_rank) - ground_truth - errors.append(error_bf_rank) + if(prob_noisy.f_dimension == 1 and prob_noisy.c_dimension == 0): + winners_brute_force_f = brute_force_average_f( + pop_noisy, num_winner, eval_budget) + error_bf_f = sum(winners_brute_force_f) - ground_truth + errors.append(error_bf_f) - if(prob_noisy.f_dimension == 1 and prob_noisy.c_dimension == 0): - winners_brute_force_f = brute_force_average_f(pop_noisy, num_winner, eval_budget) - error_bf_f = sum(winners_brute_force_f) - ground_truth - errors.append(error_bf_f) - - return errors + return errors # Setting some common parameters for the experimentations num_trials = 200 @@ -161,102 +186,132 @@ def get_rank_sum_errors(prob_orig = problem.ackley(10), noise = 0.3, pop_n = 20, metric_name = 'Capture rate (%)' """ -metric_fn = get_rank_sum_errors; +metric_fn = get_rank_sum_errors metric_name = 'Rank-sum error' + def repeat_and_average(fn, *args): - s = [] - for i in range(num_trials): - s.append(fn(*args)) - s = np.array(s) - return np.mean(s,0) + s = [] + for i in range(num_trials): + s.append(fn(*args)) + s = np.array(s) + return np.mean(s, 0) + def run_varying_noise(prob_orig): - # --- Set-up A: Test with different noise levels --- - plt.close() - noise_levels = [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5, 0.6, 0.7] - - start_n = 100 - eval_budget = start_n * 5 - success_rates = []; - - for noise in noise_levels: - print 'Setup A: noise = %lf' % noise - averaged_rates = repeat_and_average(metric_fn, prob_orig, noise, start_n, - final_n, eval_budget) - success_rates.append(averaged_rates) - - success_rates = np.array(success_rates).T - - plt.ion() - plt.hold(True) - for (i, succ) in enumerate(success_rates): - plt.plot(noise_levels, succ) - plt.xlabel('Noise level (sigma)') - plt.ylabel(metric_name) - plt.title('%s: Varying noise levels\n# of inds: %d -> %d, eval. budget = %d' % (prob_orig.get_name(), start_n, final_n, eval_budget)) - plt.legend(('Racing', 'Brute-force (averaged rank)', 'Brute-force (averaged f)'), loc = 'best') - plt.savefig('%s-racing-varying-noise' % prob_orig.get_name().replace(' ', ''), formant='png') - # --- End of set-up A ---- + # --- Set-up A: Test with different noise levels --- + plt.close() + noise_levels = [ + 0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5, 0.6, 0.7] + + start_n = 100 + eval_budget = start_n * 5 + success_rates = [] + + for noise in noise_levels: + print('Setup A: noise = %lf' % noise) + averaged_rates = repeat_and_average( + metric_fn, + prob_orig, + noise, + start_n, + final_n, + eval_budget) + success_rates.append(averaged_rates) + + success_rates = np.array(success_rates).T + + plt.ion() + plt.hold(True) + for (i, succ) in enumerate(success_rates): + plt.plot(noise_levels, succ) + plt.xlabel('Noise level (sigma)') + plt.ylabel(metric_name) + plt.title( + '%s: Varying noise levels\n# of inds: %d -> %d, eval. budget = %d' % + (prob_orig.get_name(), start_n, final_n, eval_budget)) + plt.legend( + ('Racing', + 'Brute-force (averaged rank)', + 'Brute-force (averaged f)'), + loc='best') + plt.savefig('%s-racing-varying-noise' % + prob_orig.get_name().replace(' ', ''), formant='png') + # --- End of set-up A ---- + def run_varying_initial_size(prob_orig): - # --- Set-up B: Test with different initial pop sizes --- - plt.close() - start_n_list = [20,40,60,80,100,120,140,160,180,200] - success_rates = []; - - for start in start_n_list: - print 'Setup B: initial popsize = %d' % start - averaged_rates = repeat_and_average(metric_fn, prob_orig, - default_noise, start, final_n, - start * 5) - success_rates.append(averaged_rates) - - success_rates = np.array(success_rates).T - - plt.ion() - plt.hold(True) - for (i, succ) in enumerate(success_rates): - plt.plot(start_n_list, succ) - plt.xlabel('Initial population size') - plt.ylabel(metric_name) - plt.title('%s: Varying initial pop size\n# of winners: %d, eval. budget = popsize * 5' % (prob_orig.get_name(), final_n)) - plt.legend(('Racing', 'Brute-force (averaged rank)', 'Brute-force (averaged f)'), loc = 'best') - plt.savefig('%s-racing-varying-initialpopsize' % prob_orig.get_name().replace(' ', ''), formant='png') - # --- End of set-up B --- + # --- Set-up B: Test with different initial pop sizes --- + plt.close() + start_n_list = [20, 40, 60, 80, 100, 120, 140, 160, 180, 200] + success_rates = [] + + for start in start_n_list: + print('Setup B: initial popsize = %d' % start) + averaged_rates = repeat_and_average(metric_fn, prob_orig, + default_noise, start, final_n, + start * 5) + success_rates.append(averaged_rates) + + success_rates = np.array(success_rates).T + + plt.ion() + plt.hold(True) + for (i, succ) in enumerate(success_rates): + plt.plot(start_n_list, succ) + plt.xlabel('Initial population size') + plt.ylabel(metric_name) + plt.title( + '%s: Varying initial pop size\n# of winners: %d, eval. budget = popsize * 5' % + (prob_orig.get_name(), final_n)) + plt.legend( + ('Racing', + 'Brute-force (averaged rank)', + 'Brute-force (averaged f)'), + loc='best') + plt.savefig('%s-racing-varying-initialpopsize' % + prob_orig.get_name().replace(' ', ''), formant='png') + # --- End of set-up B --- + def run_varying_eval_budget(prob_orig): - # --- Set-up C: Test with different evaluation budget --- - plt.close() - eval_budget_list = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000] - start_n = 100 - success_rates = []; - - for eval_budget in eval_budget_list: - print 'Setup C: evaluation budget = %d' % eval_budget - averaged_rates = repeat_and_average(metric_fn, prob_orig, - default_noise, start_n, final_n, - eval_budget) - success_rates.append(averaged_rates) - - success_rates = np.array(success_rates).T - - plt.ion() - plt.hold(True) - for (i, succ) in enumerate(success_rates): - plt.plot(eval_budget_list, succ) - plt.xlabel('Allowed evaluation budget') - plt.ylabel(metric_name) - plt.title('%s: Varying eval. budget\n# of inds: %d -> %d' % (prob_orig.get_name(), start_n, final_n)) - plt.legend(('Racing', 'Brute-force (averaged rank)', 'Brute-force (averaged f)'), loc = 'best') - plt.savefig('%s-racing-varying-budget' % prob_orig.get_name().replace(' ', ''), formant='png') - # --- End of set-up C --- + # --- Set-up C: Test with different evaluation budget --- + plt.close() + eval_budget_list = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000] + start_n = 100 + success_rates = [] + + for eval_budget in eval_budget_list: + print('Setup C: evaluation budget = %d' % eval_budget) + averaged_rates = repeat_and_average(metric_fn, prob_orig, + default_noise, start_n, final_n, + eval_budget) + success_rates.append(averaged_rates) + + success_rates = np.array(success_rates).T + + plt.ion() + plt.hold(True) + for (i, succ) in enumerate(success_rates): + plt.plot(eval_budget_list, succ) + plt.xlabel('Allowed evaluation budget') + plt.ylabel(metric_name) + plt.title('%s: Varying eval. budget\n# of inds: %d -> %d' % + (prob_orig.get_name(), start_n, final_n)) + plt.legend( + ('Racing', + 'Brute-force (averaged rank)', + 'Brute-force (averaged f)'), + loc='best') + plt.savefig('%s-racing-varying-budget' % + prob_orig.get_name().replace(' ', ''), formant='png') + # --- End of set-up C --- if __name__ == '__main__': - prob_orig = problem.ackley(10) - #prob_orig = problem.cec2006(5) - #prob_orig = problem.zdt(1,10) + prob_orig = problem.ackley(10) + #prob_orig = problem.cec2006(5) + #prob_orig = problem.zdt(1,10) - run_varying_noise(prob_orig) - run_varying_initial_size(prob_orig) - run_varying_eval_budget(prob_orig) + run_varying_noise(prob_orig) + run_varying_initial_size(prob_orig) + run_varying_eval_budget(prob_orig) diff --git a/PyGMO/migration/__init__.py b/PyGMO/migration/__init__.py index 2b4b7388..36001d27 100644 --- a/PyGMO/migration/__init__.py +++ b/PyGMO/migration/__init__.py @@ -1,5 +1,5 @@ # -*- coding: iso-8859-1 -*- -from _migration import * +from PyGMO.migration._migration import * _base_r_policy = _migration._base_r_policy _base_s_policy = _migration._base_s_policy diff --git a/PyGMO/problem/__init__.py b/PyGMO/problem/__init__.py index 619424b7..c24d3321 100644 --- a/PyGMO/problem/__init__.py +++ b/PyGMO/problem/__init__.py @@ -1,693 +1,796 @@ # -*- coding: iso-8859-1 -*- -from _base import base -from _base_stochastic import base_stochastic -from _problem import * -from _problem import _base -from _problem import _base_stochastic -from _example import py_example -from _example import py_example_max -from _example_stochastic import py_example_stochastic -from _pl2pl import py_pl2pl -from _mo import * +from PyGMO.problem._base import base +from PyGMO.problem._base_stochastic import base_stochastic +from PyGMO.problem._problem import * +from PyGMO.problem._problem import _base +from PyGMO.problem._problem import _base_stochastic +from PyGMO.problem._example import py_example +from PyGMO.problem._example import py_example_max +from PyGMO.problem._example_stochastic import py_example_stochastic +from PyGMO.problem._pl2pl import py_pl2pl +from PyGMO.problem._mo import * # If GTOP database support is active import interplanetary trajectory problems try: - from _gtop import * + from PyGMO.problem._gtop import * except ImportError: - pass + pass # If GSL support is active import mit_sphere try: - from _mit_spheres import visualize as _visualize - mit_spheres.visualize = _visualize - def _mit_spheres_ctor(self, sample_size = 10, n_hidden = 10, ode_prec = 1E-3, seed = 0, symmetric = False, simulation_time = 50.0, sides = [0.6,0.7,0.8]): - """ - Construct a Neurocontroller Evolution problem that seeks to drive three point masses to form a triangle - This problem was used to design a contorller for the MIT SPHERES test bed on boear the ISS - - USAGE: problem.mit_spheres(sample_size = 10, n_hidden = 10, ode_prec = 1E-3, seed = 0, symmetric = False, simulation_time = 50.0): - - * sample_size: number of initial conditions the neurocontroller is tested from - * n_hidden: number of hidden for the feed-forward neural network - * ode_prec: relative numerical precision of neurons the ODE integrator - * seed: integer used as starting random seed to build the pseudorandom sequences used to generate the sample - * symmetric: when True activates a Neural Network having symmetric weights (i.e. purely homogeneuos agents) - * simulation_time: when True activates a Neural Network having symmetric weights (i.e. purely homogeneuos agents) - * sides: sides of the triangle - - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(sample_size) - arg_list.append(n_hidden) - arg_list.append(ode_prec) - arg_list.append(seed) - arg_list.append(symmetric) - arg_list.append(simulation_time) - arg_list.append(sides) - self._orig_init(*arg_list) - mit_spheres._orig_init = mit_spheres.__init__ - mit_spheres.__init__ = _mit_spheres_ctor - - from PyGMO import __version__ - __version__ = __version__ + "GTOP " + "GSL " + from PyGMO.problem._mit_spheres import visualize as _visualize + mit_spheres.visualize = _visualize + + def _mit_spheres_ctor( + self, + sample_size=10, + n_hidden=10, + ode_prec=1E-3, + seed=0, + symmetric=False, + simulation_time=50.0, + sides=[ + 0.6, + 0.7, + 0.8]): + """ + Construct a Neurocontroller Evolution problem that seeks to drive three point masses to form a triangle + This problem was used to design a contorller for the MIT SPHERES test bed on boear the ISS + + USAGE: problem.mit_spheres(sample_size = 10, n_hidden = 10, ode_prec = 1E-3, seed = 0, symmetric = False, simulation_time = 50.0): + + * sample_size: number of initial conditions the neurocontroller is tested from + * n_hidden: number of hidden for the feed-forward neural network + * ode_prec: relative numerical precision of neurons the ODE integrator + * seed: integer used as starting random seed to build the pseudorandom sequences used to generate the sample + * symmetric: when True activates a Neural Network having symmetric weights (i.e. purely homogeneuos agents) + * simulation_time: when True activates a Neural Network having symmetric weights (i.e. purely homogeneuos agents) + * sides: sides of the triangle + +""" + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(sample_size) + arg_list.append(n_hidden) + arg_list.append(ode_prec) + arg_list.append(seed) + arg_list.append(symmetric) + arg_list.append(simulation_time) + arg_list.append(sides) + self._orig_init(*arg_list) + mit_spheres._orig_init = mit_spheres.__init__ + mit_spheres.__init__ = _mit_spheres_ctor + + from PyGMO import __version__ + __version__ = __version__ + "GTOP " + "GSL " except: - pass + pass + +# Creating the list of problems + -#Creating the list of problems def _get_problem_list(): - from PyGMO import problem - return [problem.__dict__[n] for n in filter(lambda n: not n.startswith('_') and not n == 'base' and not n =="base_stochastic" and (issubclass(problem.__dict__[n],problem._base) or issubclass(problem.__dict__[n],problem._base_stochastic)),dir(problem))] + from PyGMO import problem + return [ + problem.__dict__[n] for n in [ + n for n in dir(problem) if not n.startswith('_') and not n == 'base' and not n == "base_stochastic" and ( + issubclass( + problem.__dict__[n], + problem._base) or issubclass( + problem.__dict__[n], + problem._base_stochastic))]] -# Redefining the constructors of all problems to obtain good documentation and allowing kwargs -def _rastrigin_ctor(self,dim = 10): - """ - Constructs a Rastrigin problem (Box-Constrained Continuous Single-Objective) +# Redefining the constructors of all problems to obtain good documentation +# and allowing kwargs - USAGE: problem.rastrigin(dim=10) - * dim: problem dimension - """ +def _rastrigin_ctor(self, dim=10): + """ + Constructs a Rastrigin problem (Box-Constrained Continuous Single-Objective) + + USAGE: problem.rastrigin(dim=10) - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(dim) - self._orig_init(*arg_list) + * dim: problem dimension + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(dim) + self._orig_init(*arg_list) rastrigin._orig_init = rastrigin.__init__ rastrigin.__init__ = _rastrigin_ctor -def _rosenbrock_ctor(self,dim = 10): - """ - Constructs a Rosenbrock problem (Box-Constrained Continuous Single-Objective) - USAGE: problem.rosenbrock(dim=10) +def _rosenbrock_ctor(self, dim=10): + """ + Constructs a Rosenbrock problem (Box-Constrained Continuous Single-Objective) + + USAGE: problem.rosenbrock(dim=10) - * dim: problem dimension - """ + * dim: problem dimension + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(dim) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(dim) + self._orig_init(*arg_list) rosenbrock._orig_init = rosenbrock.__init__ rosenbrock.__init__ = _rosenbrock_ctor -def _ackley_ctor(self,dim = 10): - """ - Constructs a Ackley problem (Box-Constrained Continuous Single-Objective) - USAGE: problem.ackley(dim=10) +def _ackley_ctor(self, dim=10): + """ + Constructs a Ackley problem (Box-Constrained Continuous Single-Objective) - * dim: problem dimension - """ + USAGE: problem.ackley(dim=10) - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(dim) - self._orig_init(*arg_list) + * dim: problem dimension + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(dim) + self._orig_init(*arg_list) ackley._orig_init = ackley.__init__ ackley.__init__ = _ackley_ctor -def _schwefel_ctor(self,dim = 10): - """ - Constructs a Schwefel problem (Box-Constrained Continuous Single-Objective) - USAGE: problem.schwefel(dim=10) +def _schwefel_ctor(self, dim=10): + """ + Constructs a Schwefel problem (Box-Constrained Continuous Single-Objective) + + USAGE: problem.schwefel(dim=10) - * dim: problem dimension - """ + * dim: problem dimension + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(dim) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(dim) + self._orig_init(*arg_list) schwefel._orig_init = schwefel.__init__ schwefel.__init__ = _schwefel_ctor -def _dejong_ctor(self,dim = 10): - """ - Constructs a De Jong problem (Box-Constrained Continuous Single-Objective) - USAGE: problem.dejong(dim=10) +def _dejong_ctor(self, dim=10): + """ + Constructs a De Jong problem (Box-Constrained Continuous Single-Objective) + + USAGE: problem.dejong(dim=10) - * dim: problem dimension - """ + * dim: problem dimension + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(dim) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(dim) + self._orig_init(*arg_list) dejong._orig_init = dejong.__init__ dejong.__init__ = _dejong_ctor -def _griewank_ctor(self,dim = 10): - """ - Constructs a Griewank problem (Box-Constrained Continuous Single-Objective) - USAGE: problem.griewank(dim=10) +def _griewank_ctor(self, dim=10): + """ + Constructs a Griewank problem (Box-Constrained Continuous Single-Objective) + + USAGE: problem.griewank(dim=10) - * dim: problem dimension - """ + * dim: problem dimension + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(dim) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(dim) + self._orig_init(*arg_list) griewank._orig_init = griewank.__init__ griewank.__init__ = _dejong_ctor -def _lennard_jones_ctor(self,n_atoms = 4): - """ - Constructs a Lennard-Jones problem (Box-Constrained Continuous Single-Objective) - USAGE: problem.lennard_jones(n_atoms=4) +def _lennard_jones_ctor(self, n_atoms=4): + """ + Constructs a Lennard-Jones problem (Box-Constrained Continuous Single-Objective) - * n_atoms: number of atoms - """ + USAGE: problem.lennard_jones(n_atoms=4) - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(n_atoms) - self._orig_init(*arg_list) + * n_atoms: number of atoms + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(n_atoms) + self._orig_init(*arg_list) lennard_jones._orig_init = lennard_jones.__init__ lennard_jones.__init__ = _lennard_jones_ctor + def _branin_ctor(self): - """ - Constructs a Branin problem (Box-Constrained Continuous Single-Objective) + """ + Constructs a Branin problem (Box-Constrained Continuous Single-Objective) - USAGE: problem.branin() + USAGE: problem.branin() - """ - self._orig_init() + """ + self._orig_init() branin._orig_init = branin.__init__ branin.__init__ = _branin_ctor def _himmelblau_ctor(self): - """ - Constructs a Himmelblau problem (Box-Constrained Continuous Single-Objective) + """ + Constructs a Himmelblau problem (Box-Constrained Continuous Single-Objective) - USAGE: problem.himmelblau() + USAGE: problem.himmelblau() - """ - self._orig_init() + """ + self._orig_init() himmelblau._orig_init = himmelblau.__init__ himmelblau.__init__ = _himmelblau_ctor + def _bukin_ctor(self): - """ - Constructs a Bukin's f6 problem (Box-Constrained Continuous Single-Objective) + """ + Constructs a Bukin's f6 problem (Box-Constrained Continuous Single-Objective) - USAGE: problem.bukin() + USAGE: problem.bukin() - """ - self._orig_init() + """ + self._orig_init() bukin._orig_init = bukin.__init__ bukin.__init__ = _bukin_ctor -def _michalewicz_ctor(self,dim = 10): - """ - Constructs a Michalewicz problem (Box-Constrained Continuous Single-Objective) - USAGE: problem.michalewicz(dim=5) +def _michalewicz_ctor(self, dim=10): + """ + Constructs a Michalewicz problem (Box-Constrained Continuous Single-Objective) + + USAGE: problem.michalewicz(dim=5) - NOTE: Minimum is -4.687 for dim=5 and -9.66 for dim = 10 + NOTE: Minimum is -4.687 for dim=5 and -9.66 for dim = 10 - * dim: problem dimension - """ + * dim: problem dimension + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(dim) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(dim) + self._orig_init(*arg_list) michalewicz._orig_init = michalewicz.__init__ michalewicz.__init__ = _michalewicz_ctor -def _kur_ctor(self,dim = 10): - """ - Constructs a Kursawe's study problem (Box-Constrained Continuous Multi-Objective) - NOTE: K Deb, A Pratap, S Agarwal: A fast and elitist multiobjective genetic algorithm: NSGA-II, IEEE Transactions on, 2002 +def _kur_ctor(self, dim=10): + """ + Constructs a Kursawe's study problem (Box-Constrained Continuous Multi-Objective) + + NOTE: K Deb, A Pratap, S Agarwal: A fast and elitist multiobjective genetic algorithm: NSGA-II, IEEE Transactions on, 2002 - USAGE: problem.kur(dim = 10) + USAGE: problem.kur(dim = 10) - * dim: problem dimension - """ - arg_list=[] - arg_list.append(dim) - self._orig_init(*arg_list) + * dim: problem dimension + """ + arg_list = [] + arg_list.append(dim) + self._orig_init(*arg_list) kur._orig_init = kur.__init__ kur.__init__ = _kur_ctor + def _fon_ctor(self): - """ - Constructs a Fonseca and Fleming's study problem (Box-Constrained Continuous Multi-Objective) + """ + Constructs a Fonseca and Fleming's study problem (Box-Constrained Continuous Multi-Objective) - NOTE: K Deb, A Pratap, S Agarwal: A fast and elitist multiobjective genetic algorithm: NSGA-II, IEEE Transactions on, 2002 + NOTE: K Deb, A Pratap, S Agarwal: A fast and elitist multiobjective genetic algorithm: NSGA-II, IEEE Transactions on, 2002 - USAGE: problem.fon() - """ - arg_list=[] - self._orig_init(*arg_list) + USAGE: problem.fon() + """ + arg_list = [] + self._orig_init(*arg_list) fon._orig_init = fon.__init__ fon.__init__ = _fon_ctor + def _pol_ctor(self): - """ - Constructs a Poloni's study study problem (Box-Constrained Continuous Multi-Objective) + """ + Constructs a Poloni's study study problem (Box-Constrained Continuous Multi-Objective) - NOTE: K Deb, A Pratap, S Agarwal: A fast and elitist multiobjective genetic algorithm: NSGA-II, IEEE Transactions on, 2002 + NOTE: K Deb, A Pratap, S Agarwal: A fast and elitist multiobjective genetic algorithm: NSGA-II, IEEE Transactions on, 2002 - USAGE: problem.pol() - """ - arg_list=[] - self._orig_init(*arg_list) + USAGE: problem.pol() + """ + arg_list = [] + self._orig_init(*arg_list) pol._orig_init = pol.__init__ pol.__init__ = _pol_ctor + def _sch_ctor(self): - """ - Constructs a Schaffer's study problem (Box-Constrained Continuous Multi-Objective) + """ + Constructs a Schaffer's study problem (Box-Constrained Continuous Multi-Objective) - NOTE: K Deb, A Pratap, S Agarwal: A fast and elitist multiobjective genetic algorithm: NSGA-II, IEEE Transactions on, 2002 + NOTE: K Deb, A Pratap, S Agarwal: A fast and elitist multiobjective genetic algorithm: NSGA-II, IEEE Transactions on, 2002 - USAGE: problem.sch() - """ - arg_list=[] - self._orig_init(*arg_list) + USAGE: problem.sch() + """ + arg_list = [] + self._orig_init(*arg_list) sch._orig_init = sch.__init__ sch.__init__ = _sch_ctor + def _pressure_vessel_ctor(self): - """ - Constructs a pressure vessel design problem (Constrained Continuous Single-Objective) + """ + Constructs a pressure vessel design problem (Constrained Continuous Single-Objective) - USAGE: problem.pressure_vessel() - """ - arg_list=[] - self._orig_init(*arg_list) + USAGE: problem.pressure_vessel() + """ + arg_list = [] + self._orig_init(*arg_list) pressure_vessel._orig_init = pressure_vessel.__init__ pressure_vessel.__init__ = _pressure_vessel_ctor + def _tens_comp_string_ctor(self): - """ - Constructs a tension compression string design problem (Constrained Continuous Single-Objective) + """ + Constructs a tension compression string design problem (Constrained Continuous Single-Objective) - USAGE: problem.tens_comp_string() - """ - arg_list=[] - self._orig_init(*arg_list) + USAGE: problem.tens_comp_string() + """ + arg_list = [] + self._orig_init(*arg_list) tens_comp_string._orig_init = tens_comp_string.__init__ tens_comp_string.__init__ = _tens_comp_string_ctor + def _welded_beam_ctor(self): - """ - Constructs a welded beam design problem (Constrained Continuous Single-Objective) + """ + Constructs a welded beam design problem (Constrained Continuous Single-Objective) - USAGE: problem.welded_beam() - """ - arg_list=[] - self._orig_init(*arg_list) + USAGE: problem.welded_beam() + """ + arg_list = [] + self._orig_init(*arg_list) welded_beam._orig_init = welded_beam.__init__ welded_beam.__init__ = _welded_beam_ctor + def _cec2006_ctor(self, prob_id=1): - """ - Constructs one of the 24 CEC2006 Competition Problems (Constrained Continuous Single-Objective) + """ + Constructs one of the 24 CEC2006 Competition Problems (Constrained Continuous Single-Objective) - USAGE: problem.cec2006(prob_id=1) + USAGE: problem.cec2006(prob_id=1) - * prob_id: Problem number, one of [1,2,...24] - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(prob_id) - self._orig_init(*arg_list) + * prob_id: Problem number, one of [1,2,...24] + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(prob_id) + self._orig_init(*arg_list) cec2006._orig_init = cec2006.__init__ cec2006.__init__ = _cec2006_ctor + def _cec2009_ctor(self, prob_id=1, dim=30, is_constrained=False): - """ - Constructs one of the 20 CEC2009 Competition Problems (Constrained / Unconstrained Multi-Objective) - - USAGE: problem.cec2009(prob_id=1, dim=30, is_constrained=False) - - * prob_id: Problem number, one of [1,2,...10] - * dim: Problem's dimension (default is 30, corresponding to the competition set-up) - * is_constrained: if True constructs the CF problems, otherwise the UF (constrained/unconstrained) - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(prob_id) - arg_list.append(dim) - arg_list.append(is_constrained) - self._orig_init(*arg_list) + """ + Constructs one of the 20 CEC2009 Competition Problems (Constrained / Unconstrained Multi-Objective) + + USAGE: problem.cec2009(prob_id=1, dim=30, is_constrained=False) + + * prob_id: Problem number, one of [1,2,...10] + * dim: Problem's dimension (default is 30, corresponding to the competition set-up) + * is_constrained: if True constructs the CF problems, otherwise the UF (constrained/unconstrained) + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(prob_id) + arg_list.append(dim) + arg_list.append(is_constrained) + self._orig_init(*arg_list) cec2009._orig_init = cec2009.__init__ cec2009.__init__ = _cec2009_ctor + def _cec2013_ctor(self, prob_id=1, dim=10, path="input_data/"): - """ - Constructs one of the 28 CEC2013 Competition Problems (Box-Constrained Continuous Single-Objective) + """ + Constructs one of the 28 CEC2013 Competition Problems (Box-Constrained Continuous Single-Objective) - NOTE: this problem requires two files to be put in the path indicated: "M_Dxx.txt" and "shift_data.txt". - These files can be downloaded from the CEC2013 competition site: http://web.mysites.ntu.edu.sg/epnsugan/PublicSite/Shared%20Documents/CEC2013/cec13-c-code.zip + NOTE: this problem requires two files to be put in the path indicated: "M_Dxx.txt" and "shift_data.txt". + These files can be downloaded from the CEC2013 competition site: http://web.mysites.ntu.edu.sg/epnsugan/PublicSite/Shared%20Documents/CEC2013/cec13-c-code.zip - USAGE: problem.cec2013(dim = 10, prob_id=1, path="input_data/") + USAGE: problem.cec2013(dim = 10, prob_id=1, path="input_data/") - * prob_id: Problem number, one of [1,2,...10] - * dim: Problem's dimension (default is 10) - * path: Whether the problem is constrained or unconstrained + * prob_id: Problem number, one of [1,2,...10] + * dim: Problem's dimension (default is 10) + * path: Whether the problem is constrained or unconstrained - """ - arg_list=[] - arg_list.append(prob_id) - arg_list.append(dim) - arg_list.append(path) - self._orig_init(*arg_list) + """ + arg_list = [] + arg_list.append(prob_id) + arg_list.append(dim) + arg_list.append(path) + self._orig_init(*arg_list) cec2013._orig_init = cec2013.__init__ cec2013.__init__ = _cec2013_ctor -def _luksan_vlcek_1_ctor(self,dim = 3): - """ - Constructs the first Luksan Vlcek problem (Constrained Continuous Single-Objective) - NOTE: L. Luksan and J. Vlcek, "Sparse and Parially Separable Test Problems for Unconstrained and Equality Constrained Optimization" +def _luksan_vlcek_1_ctor(self, dim=3): + """ + Constructs the first Luksan Vlcek problem (Constrained Continuous Single-Objective) + + NOTE: L. Luksan and J. Vlcek, "Sparse and Parially Separable Test Problems for Unconstrained and Equality Constrained Optimization" - USAGE: problem.luksan_vlcek_1(dim=3) + USAGE: problem.luksan_vlcek_1(dim=3) - * dim: problem dimension - """ + * dim: problem dimension + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(dim) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(dim) + self._orig_init(*arg_list) luksan_vlcek_1._orig_init = luksan_vlcek_1.__init__ luksan_vlcek_1.__init__ = _luksan_vlcek_1_ctor -def _luksan_vlcek_2_ctor(self,dim = 16): - """ - Constructs the second Luksan Vlcek problem (Constrained Continuous Single-Objective) - NOTE: L. Luksan and J. Vlcek, "Sparse and Parially Separable Test Problems for Unconstrained and Equality Constrained Optimization" +def _luksan_vlcek_2_ctor(self, dim=16): + """ + Constructs the second Luksan Vlcek problem (Constrained Continuous Single-Objective) + + NOTE: L. Luksan and J. Vlcek, "Sparse and Parially Separable Test Problems for Unconstrained and Equality Constrained Optimization" - USAGE: problem.luksan_vlcek_2(dim=16) + USAGE: problem.luksan_vlcek_2(dim=16) - * dim: problem dimension - """ + * dim: problem dimension + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(dim) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(dim) + self._orig_init(*arg_list) luksan_vlcek_2._orig_init = luksan_vlcek_2.__init__ luksan_vlcek_2.__init__ = _luksan_vlcek_2_ctor -def _luksan_vlcek_3_ctor(self,dim = 8): - """ - Constructs the third Luksan Vlcek problem (Constrained Continuous Single-Objective) - NOTE: L. Luksan and J. Vlcek, "Sparse and Parially Separable Test Problems for Unconstrained and Equality Constrained Optimization" +def _luksan_vlcek_3_ctor(self, dim=8): + """ + Constructs the third Luksan Vlcek problem (Constrained Continuous Single-Objective) + + NOTE: L. Luksan and J. Vlcek, "Sparse and Parially Separable Test Problems for Unconstrained and Equality Constrained Optimization" - USAGE: problem.luksan_vlcek_3(dim=8) + USAGE: problem.luksan_vlcek_3(dim=8) - * dim: problem dimension - """ + * dim: problem dimension + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(dim) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(dim) + self._orig_init(*arg_list) luksan_vlcek_3._orig_init = luksan_vlcek_3.__init__ luksan_vlcek_3.__init__ = _luksan_vlcek_3_ctor + def _snopt_toyprob_ctor(self): - """ - Constructs SNOPT toy-problem (Box-Constrained Continuous Multi-Objective) + """ + Constructs SNOPT toy-problem (Box-Constrained Continuous Multi-Objective) - USAGE: problem.snopt_toyprob() - """ - arg_list=[] - self._orig_init(*arg_list) + USAGE: problem.snopt_toyprob() + """ + arg_list = [] + self._orig_init(*arg_list) snopt_toyprob._orig_init = snopt_toyprob.__init__ snopt_toyprob.__init__ = _snopt_toyprob_ctor -def _string_match_ctor(self,string = "Can we use it for space?"): - """ - Constructs a string-match problem (Box-Constrained Integer Single-Objective) - NOTE: This is the problem of matching a string. Transcribed as an optimization problem +def _string_match_ctor(self, string="Can we use it for space?"): + """ + Constructs a string-match problem (Box-Constrained Integer Single-Objective) + + NOTE: This is the problem of matching a string. Transcribed as an optimization problem - USAGE: problem.string_match(string = "mah") + USAGE: problem.string_match(string = "mah") - * string: string to match - """ + * string: string to match + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(string) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(string) + self._orig_init(*arg_list) string_match._orig_init = string_match.__init__ string_match.__init__ = _string_match_ctor -def _golomb_ruler_ctor(self,order = 5, length=10): - """ - Constructs a Golomb Ruler problem (Constrained Integer Single-Objective) - NOTE: see http://en.wikipedia.org/wiki/Golomb_ruler +def _golomb_ruler_ctor(self, order=5, length=10): + """ + Constructs a Golomb Ruler problem (Constrained Integer Single-Objective) + + NOTE: see http://en.wikipedia.org/wiki/Golomb_ruler - USAGE: problem.golomb_ruler(order = 5, length=10) + USAGE: problem.golomb_ruler(order = 5, length=10) - * order: order of the Golomb ruler - * length: length of the Golomb ruler - """ + * order: order of the Golomb ruler + * length: length of the Golomb ruler + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(order) - arg_list.append(length) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(order) + arg_list.append(length) + self._orig_init(*arg_list) golomb_ruler._orig_init = golomb_ruler.__init__ golomb_ruler.__init__ = _golomb_ruler_ctor -def _tsp_ctor(self,matrix = [[0,1,2],[1,0,5],[2,5,0]]): - """ - Constructs a Travelling Salesman problem (Constrained Integer Single-Objective) - USAGE: problem.tsp(matrix = [0,1,2],[1,0,5],[2,5,0]) +def _tsp_ctor(self, matrix=[[0, 1, 2], [1, 0, 5], [2, 5, 0]]): + """ + Constructs a Travelling Salesman problem (Constrained Integer Single-Objective) - * matrix: inter-city distances (symmetric matrix) - """ + USAGE: problem.tsp(matrix = [0,1,2],[1,0,5],[2,5,0]) - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(matrix) - self._orig_init(*arg_list) + * matrix: inter-city distances (symmetric matrix) + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(matrix) + self._orig_init(*arg_list) tsp._orig_init = tsp.__init__ tsp.__init__ = _tsp_ctor -def _knapsack_ctor(self,values = [1,2,3,4,5], weights = [10, 40, 30, 50, 20], max_weight = 100): - """ - Constructs a 0-1 Knapsack Problem (Constrained Integer Single-Objective) - USAGE: problem.knapsack(values = [1,2,3,4,5], weights = [10, 40, 30, 50, 20], max_weight = 100) +def _knapsack_ctor( + self, values=[ + 1, 2, 3, 4, 5], weights=[ + 10, 40, 30, 50, 20], max_weight=100): + """ + Constructs a 0-1 Knapsack Problem (Constrained Integer Single-Objective) + + USAGE: problem.knapsack(values = [1,2,3,4,5], weights = [10, 40, 30, 50, 20], max_weight = 100) - * values: raw array of values - * weights: raw array of weights - * max_weight: maximum weight - """ + * values: raw array of values + * weights: raw array of weights + * max_weight: maximum weight + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(values) - arg_list.append(weights) - arg_list.append(max_weight) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(values) + arg_list.append(weights) + arg_list.append(max_weight) + self._orig_init(*arg_list) knapsack._orig_init = knapsack.__init__ knapsack.__init__ = _knapsack_ctor -def _inventory_ctor(self, weeks = 4, sample_size = 10, seed = 0): - """ - Constructs an Inventory Problem (Stochastic Objective Function) - NOTE: see www2.isye.gatech.edu/people/faculty/Alex_Shapiro/SPbook.pdf +def _inventory_ctor(self, weeks=4, sample_size=10, seed=0): + """ + Constructs an Inventory Problem (Stochastic Objective Function) + + NOTE: see www2.isye.gatech.edu/people/faculty/Alex_Shapiro/SPbook.pdf - USAGE: problem.inventory(weeks = 4, sample_size = 10, seed = 0): + USAGE: problem.inventory(weeks = 4, sample_size = 10, seed = 0): - * week: dimension of the problem corresponding to the numer of weeks - to plan the inventory for. - * sample_size: dimension of the sample used to approximate the expected value - * seed: integer used as starting random seed to build the - pseudorandom sequences used to generate the sample - """ + * week: dimension of the problem corresponding to the numer of weeks + to plan the inventory for. + * sample_size: dimension of the sample used to approximate the expected value + * seed: integer used as starting random seed to build the + pseudorandom sequences used to generate the sample + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(weeks) - arg_list.append(sample_size) - arg_list.append(seed) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(weeks) + arg_list.append(sample_size) + arg_list.append(seed) + self._orig_init(*arg_list) inventory._orig_init = inventory.__init__ inventory.__init__ = _inventory_ctor -def _normalized_ctor(self, problem = None): - """ - Normalizes a problem (e.g. maps all variables to [-1,1]) - NOTE: this meta-problem constructs a new problem having normalized bounds/variables +def _normalized_ctor(self, problem=None): + """ + Normalizes a problem (e.g. maps all variables to [-1,1]) + + NOTE: this meta-problem constructs a new problem having normalized bounds/variables - USAGE: problem.normalized(problem=PyGMO.ackley(1)) + USAGE: problem.normalized(problem=PyGMO.ackley(1)) - * problem: PyGMO problem one wants to normalize + * problem: PyGMO problem one wants to normalize - """ + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - if problem == None: - problem=ackley(1) - arg_list.append(problem) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + if problem is None: + problem = ackley(1) + arg_list.append(problem) + self._orig_init(*arg_list) normalized._orig_init = normalized.__init__ normalized.__init__ = _normalized_ctor -_problem.decompose.WEIGHTED = _problem._decomposition_method.WEIGHTED -_problem.decompose.BI = _problem._decomposition_method.BI -_problem.decompose.TCHEBYCHEFF = _problem._decomposition_method.TCHEBYCHEFF -def _decompose_ctor(self, problem = None, method = decompose.WEIGHTED, weights = [], z = []): - """ - Implements a meta-problem class resulting in a decomposed version - of the multi-objective input problem, i.e. a single-objective problem - having as fitness function some kind of combination of the original fitness functions. - - NOTE: this meta-problem constructs a new single-objective problem - - USAGE: problem.decompose(problem=PyGMO.zdt(1, 2), method = problem.decompose.WEIGHTED, weights=a random vector (summing to one), z= a zero vector) - - * problem: PyGMO problem one wants to decompose - * method: the decomposition method to use (WEIGHTED, TCHEBYCHEEF or BI) - * weights: the weight vector to build the new fitness function - * z: the reference point (used in TCHEBYCHEEF and BI methods) - - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - if problem == None: - problem=zdt(1,2) - arg_list.append(problem) - arg_list.append(method) - arg_list.append(weights) - arg_list.append(z) - self._orig_init(*arg_list) + +def _decompose_ctor( + self, + problem=None, + method='tchebycheff', + weights=[], + z=[]): + """ + Implements a meta-problem class resulting in a decomposed version + of the multi-objective input problem, i.e. a single-objective problem + having as fitness function some kind of combination of the original fitness functions. + + NOTE: this meta-problem constructs a new single-objective problem + + USAGE: problem.decompose(problem=PyGMO.zdt(1, 2), method = 'tchebycheff', weights=a random vector (summing to one), z= a zero vector) + + * problem: PyGMO problem one wants to decompose + * method: the decomposition method to use ('weighted', 'tchebycheff' or 'bi') + * weights: the weight vector to build the new fitness function + * z: the reference point (used in TCHEBYCHEFF and BI methods) + + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + + def decomposition_type(x): + return { + 'weighted': _problem._decomposition_method.WEIGHTED, + 'tchebycheff': _problem._decomposition_method.TCHEBYCHEFF, + 'bi': _problem._decomposition_method.BI, + }[x] + + arg_list = [] + if problem is None: + problem = zdt(1, 2) + arg_list.append(problem) + arg_list.append(decomposition_type(method.lower())) + arg_list.append(weights) + arg_list.append(z) + self._orig_init(*arg_list) decompose._orig_init = decompose.__init__ decompose.__init__ = _decompose_ctor -def _shifted_ctor(self, problem = None, shift = None): - """ - Shifts a problem. - NOTE: this meta-problem constructs a new problem where the objective function will be f(x+b), - where b is the shift (bounds are also chaged accordingly) +def _shifted_ctor(self, problem=None, shift=None): + """ + Shifts a problem. + + NOTE: this meta-problem constructs a new problem where the objective function will be f(x+b), + where b is the shift (bounds are also chaged accordingly) - USAGE: problem.shifted(problem=PyGMO.ackley(1), shift = a random vector) + USAGE: problem.shifted(problem=PyGMO.ackley(1), shift = a random vector) - * problem: PyGMO problem one wants to shift - * shift: a value or a list containing the shifts. By default, a radnom shift is created within the problem bounds + * problem: PyGMO problem one wants to shift + * shift: a value or a list containing the shifts. By default, a radnom shift is created within the problem bounds - """ + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - if problem == None: - problem=ackley(1) - arg_list.append(problem) - if shift != None: - arg_list.append(shift) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + if problem is None: + problem = ackley(1) + arg_list.append(problem) + if shift is not None: + arg_list.append(shift) + self._orig_init(*arg_list) shifted._orig_init = shifted.__init__ shifted.__init__ = _shifted_ctor -def _rotated_ctor(self, problem = None, rotation = None): - """ - Rotates a problem. (also reflections are possible) - The new objective function will be f(Rx_{normal}), where R is an orthogonal matrix and x_{normal} - is the decision vector normailized to [-1,1] - NOTE: To ensure all of the original space is included in the new box-constrained search space, bounds - of the normalized variables are expanded to [-sqrt(2),sqrt(2)]. It is still guaranteed theat the original - objective function will not be called outside of the original bounds by projecting points outside the original - space onto the boundary +def _rotated_ctor(self, problem=None, rotation=None): + """ + Rotates a problem. (also reflections are possible) + The new objective function will be f(Rx_{normal}), where R is an orthogonal matrix and x_{normal} + is the decision vector normailized to [-1,1] + + NOTE: To ensure all of the original space is included in the new box-constrained search space, bounds + of the normalized variables are expanded to [-sqrt(2),sqrt(2)]. It is still guaranteed theat the original + objective function will not be called outside of the original bounds by projecting points outside the original + space onto the boundary - USAGE: problem.rotated(problem=PyGMO.ackley(1), rotation = a random orthogonal matrix) + USAGE: problem.rotated(problem=PyGMO.ackley(1), rotation = a random orthogonal matrix) - * problem: PyGMO problem one wants to rotate - * rotation: a list of lists (matrix). If not specified, a random orthogonal matrix is used. + * problem: PyGMO problem one wants to rotate + * rotation: a list of lists (matrix). If not specified, a random orthogonal matrix is used. - """ + """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - if problem == None: - problem=ackley(1) - arg_list.append(problem) - if rotation != None: - arg_list.append(rotation) - self._orig_init(*arg_list) + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + if problem is None: + problem = ackley(1) + arg_list.append(problem) + if rotation is not None: + arg_list.append(rotation) + self._orig_init(*arg_list) rotated._orig_init = rotated.__init__ rotated.__init__ = _rotated_ctor _problem.noisy.noise_distribution = _problem._noise_distribution -def _noisy_ctor(self, problem = None, trials = 1, param_first = 0.0, param_second = 1.0, noise_type = noisy.noise_distribution.NORMAL, seed = 0): - """ - Inject noise to a problem. - The new objective function will become stochastic, influence by a normally distributed noise. - - USAGE: problem.noisy(problem=PyGMO.ackley(1), trials = 1, param_first=0.0, param_second=1.0, noise_type = problem.noisy.noise_distribution.NORMAL, seed=0) - - * problem: PyGMO problem on which one wants to add noises - * trials: number of trials to average around - * param_first: Mean of the Gaussian noise / Lower bound of the uniform noise - * param_second: Standard deviation of the Gaussian noise / Upper bound of the uniform noise - * noise_type: Whether to inject a normally distributed noise or uniformly distributed noise - * seed: Seed for the underlying RNG - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - if problem == None: - problem = ackley(1) - arg_list.append(problem) - arg_list.append(trials) - arg_list.append(param_first) - arg_list.append(param_second) - arg_list.append(noise_type) - arg_list.append(seed) - self._orig_init(*arg_list) + + +def _noisy_ctor( + self, + problem=None, + trials=1, + param_first=0.0, + param_second=1.0, + noise_type=noisy.noise_distribution.NORMAL, + seed=0): + """ + Inject noise to a problem. + The new objective function will become stochastic, influence by a normally distributed noise. + + USAGE: problem.noisy(problem=PyGMO.ackley(1), trials = 1, param_first=0.0, param_second=1.0, noise_type = problem.noisy.noise_distribution.NORMAL, seed=0) + + * problem: PyGMO problem on which one wants to add noises + * trials: number of trials to average around + * param_first: Mean of the Gaussian noise / Lower bound of the uniform noise + * param_second: Standard deviation of the Gaussian noise / Upper bound of the uniform noise + * noise_type: Whether to inject a normally distributed noise or uniformly distributed noise + * seed: Seed for the underlying RNG + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + if problem is None: + problem = ackley(1) + arg_list.append(problem) + arg_list.append(trials) + arg_list.append(param_first) + arg_list.append(param_second) + arg_list.append(noise_type) + arg_list.append(seed) + self._orig_init(*arg_list) noisy._orig_init = noisy.__init__ noisy.__init__ = _noisy_ctor -def _robust_ctor(self, problem = None, trials = 1, rho = 0.1, seed = 0): + +def _robust_ctor(self, problem=None, trials=1, rho=0.1, seed=0): """ Inject noise to a problem in the decision space. The solution to the resulting problem is robust the the noise in the rho area. @@ -699,8 +802,8 @@ def _robust_ctor(self, problem = None, trials = 1, rho = 0.1, seed = 0): * rho: Parameter controlling the magnitude of noise * seed: Seed for the underlying RNG """ - arg_list=[] - if problem == None: + arg_list = [] + if problem is None: problem = ackley(10) arg_list.append(problem) arg_list.append(trials) @@ -713,32 +816,34 @@ def _robust_ctor(self, problem = None, trials = 1, rho = 0.1, seed = 0): # Renaming and placing the enums _problem.death_penalty.method = _problem._death_method_type -def _death_penalty_ctor(self, problem = None, method = None, penalty_factors = None): - """ - Implements a meta-problem class that wraps some other constrained problems, resulting in death penalty constraints handling. - Three implementations of the death penalty are available. The first one is the most common simple death penalty. The second one is the death - penalty defined by Angel Kuri Morales et al. (Kuri Morales, A. and Quezada, C.C. A Universal eclectic genetic algorithm for constrained optimization, Proceedings 6th European Congress on Intelligent Techniques & Soft Computing, EUFIT'98, 518-522, 1998.) - Simple death penalty penalizes the fitness function with a high value, Kuri method penalizes the - fitness function according to the rate of satisfied constraints. The third one is a weighted static penalization. - It penalizes the objective with the sum of the constraints violation, each one penalized with a given factor. - - USAGE: problem.death_penalty(problem=PyGMO.cec2006(4), method=death_penalty.method.SIMPLE) - - * problem: PyGMO constrained problem one wants to treat with a death penalty approach - * method: Simple death method set with SIMPLE, Kuri method set with KURI, weighted static penalization with WEIGHTED - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - if problem == None: - problem = cec2006(4) - if method == None: - method = death_penalty.method.SIMPLE - arg_list.append(problem) - arg_list.append(method) - if penalty_factors != None: - arg_list.append(penalty_factors) - self._orig_init(*arg_list) + +def _death_penalty_ctor(self, problem=None, method=None, penalty_factors=None): + """ + Implements a meta-problem class that wraps some other constrained problems, resulting in death penalty constraints handling. + Three implementations of the death penalty are available. The first one is the most common simple death penalty. The second one is the death + penalty defined by Angel Kuri Morales et al. (Kuri Morales, A. and Quezada, C.C. A Universal eclectic genetic algorithm for constrained optimization, Proceedings 6th European Congress on Intelligent Techniques & Soft Computing, EUFIT'98, 518-522, 1998.) + Simple death penalty penalizes the fitness function with a high value, Kuri method penalizes the + fitness function according to the rate of satisfied constraints. The third one is a weighted static penalization. + It penalizes the objective with the sum of the constraints violation, each one penalized with a given factor. + + USAGE: problem.death_penalty(problem=PyGMO.cec2006(4), method=death_penalty.method.SIMPLE) + + * problem: PyGMO constrained problem one wants to treat with a death penalty approach + * method: Simple death method set with SIMPLE, Kuri method set with KURI, weighted static penalization with WEIGHTED + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + if problem is None: + problem = cec2006(4) + if method is None: + method = death_penalty.method.SIMPLE + arg_list.append(problem) + arg_list.append(method) + if penalty_factors is not None: + arg_list.append(penalty_factors) + self._orig_init(*arg_list) death_penalty._orig_init = death_penalty.__init__ death_penalty.__init__ = _death_penalty_ctor @@ -746,37 +851,43 @@ def _death_penalty_ctor(self, problem = None, method = None, penalty_factors = N # Renaming and placing the enums _problem.con2mo.method = _problem._con2mo_method_type -def _con2mo_ctor(self, problem = None, method = None): - """ - Implements a meta-problem class that wraps some other constrained problems, - resulting in multi-objective problem. - - Three implementations of the constrained to multi-objective are available. For a problem with m constraints, - m+1 objective functions, the first objective function is the original objective function. - The first implementation is the constrained to multi-objective defined by Coello Coello. The - objectives defined from constraints includes number of violated constraints and objective functions. - The second implementation is the COMOGA multi-objective problem: a biobjective problem with the second - objective the sum of the violations of the constraints. - The third implementation is the same as the second one but splitting the sum of violations between equality - and inequality constraints, resulting in a total of three objectives problem. - - USAGE: problem.con2mo(problem=PyGMO.cec2006(4), method=con2mo.method.OBJ_CSTRS) - - * problem: original PyGMO constrained problem - * method: Coello constraints to multi-objective set with OBJ_CSTRS, COMOGA method - set with OBJ_CSTRSVIO and COMOGA with splitting of inequality and equality - constraints set with OBJ_EQVIO_INEQVIO - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - if problem == None: - problem = cec2006(4) - if method == None: - method = con2mo.method.OBJ_CSTRS - arg_list.append(problem) - arg_list.append(method) - self._orig_init(*arg_list) + +def _con2mo_ctor(self, problem=None, method='obj_cstrsvio'): + """ + Transforms a constrained problem into a multi-objective problem + + Three implementations of the constrained to multi-objective are available. + 1) 'obj_cstrs': The multi-objective problem is created with two objectives. The first + objective is the same as that of the input problem, the second is the number of constraint violated + 2) 'obj_cstrsvio': The multi-objective problem is created with two objectives. The first + objective is the same as that of the input problem, the second is the norm of the total constraint violation + 3) 'obj_eqvio_ineqvio': 2) 'obj_cstrsvio': The multi-objective problem is created with three objectives. The first + objective is the same as that of the input problem, the second is the norm of the total equality constraint violation, + the third is the norm of the total inequality constraint violation. + + USAGE: problem.con2mo(problem=PyGMO.cec2006(4), method='obj_cstrsvio') + + * problem: original PyGMO constrained problem + * method: one of 'obj_cstrsvio', 'obj_eqvio_ineqvio', 'obj_cstrs' + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + + def method_type(x): + return { + 'obj_cstrs': _problem._con2mo_method_type.OBJ_CSTRS, + 'obj_cstrsvio': _problem._con2mo_method_type.OBJ_CSTRSVIO, + 'obj_eqvio_ineqvio': _problem._con2mo_method_type.OBJ_EQVIO_INEQVIO, + }[x] + + arg_list = [] + if problem is None: + problem = cec2006(4) + method = method_type(method.lower()) + arg_list.append(problem) + arg_list.append(method) + self._orig_init(*arg_list) con2mo._orig_init = con2mo.__init__ con2mo.__init__ = _con2mo_ctor @@ -785,33 +896,56 @@ def _con2mo_ctor(self, problem = None, method = None): # Renaming and placing the enums _problem.con2uncon.method = _problem._con2uncon_method_type -def _con2uncon_ctor(self, problem = None, method = None): - """ - Implements a meta-problem class that wraps constrained problems, - resulting in an unconstrained problem. Two methods - are available for definig the objective function of the meta-problem: OPTIMALITY and FEASIBILITY. - The OPTIMALITY uses as objective function the original objective function, it basically removes the constraints from the original problem. The - FEASIBILITY uses as objective function the sum of the violation of the constraints, the meta-problem hence optimize just the level of infeasibility. - - Implements a meta-problem class that wraps some other constrained problems, - resulting in multi-objective problem. - - USAGE: problem.con2uncon(problem=PyGMO.cec2006(4), method=con2uncon.method.OPTIMALITY) - - * problem: original PyGMO constrained problem - * method: OPTIMALITY uses the objective function of the original problem. The FEASIBILITY computes the sum of the constraints violation - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - if problem == None: - problem = cec2006(4) - if method == None: - method = con2uncon.method.OPTIMALITY - arg_list.append(problem) - arg_list.append(method) - self._orig_init(*arg_list) + +def _con2uncon_ctor(self, problem=None, method=None): + """ + Implements a meta-problem class that wraps constrained problems, + resulting in an unconstrained problem. Two methods + are available for definig the objective function of the meta-problem: OPTIMALITY and FEASIBILITY. + The OPTIMALITY uses as objective function the original objective function, it basically removes the constraints from the original problem. The + FEASIBILITY uses as objective function the sum of the violation of the constraints, the meta-problem hence optimize just the level of infeasibility. + + Implements a meta-problem class that wraps some other constrained problems, + resulting in multi-objective problem. + + USAGE: problem.con2uncon(problem=PyGMO.cec2006(4), method=con2uncon.method.OPTIMALITY) + + * problem: original PyGMO constrained problem + * method: OPTIMALITY uses the objective function of the original problem. The FEASIBILITY computes the sum of the constraints violation + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + if problem is None: + problem = cec2006(4) + if method is None: + method = con2uncon.method.OPTIMALITY + arg_list.append(problem) + arg_list.append(method) + self._orig_init(*arg_list) con2uncon._orig_init = con2uncon.__init__ con2uncon.__init__ = _con2uncon_ctor + +def _quadrature_encoding_ctor(self, problem=schwefel(1), idx=[0]): + """ + Quadrature encoding problem. Transforms genes that encode angles to + quadrature encoding. The resulting problem has two genes (i,j) for every + tranformed gene (x) of the original problem: + i = sin(x) + j = cos(x) + The sin component remains at the position of the original gene. The cos + component is added to the end of the chromosome. + + USAGE: problem.quadrature_encoding(problem=schwefel(1), idx=[0]) + + * problem: original problem + * idx: indices of genes in original chromosome to be transformed + """ + arg_list = [problem, idx] + self._orig_init(*arg_list) + +quadrature_encoding._orig_init = quadrature_encoding.__init__ +quadrature_encoding.__init__ = _quadrature_encoding_ctor diff --git a/PyGMO/problem/_base.py b/PyGMO/problem/_base.py index c24c47e9..c6b4afe3 100644 --- a/PyGMO/problem/_base.py +++ b/PyGMO/problem/_base.py @@ -1,32 +1,39 @@ -from _problem import _base +from PyGMO.problem._problem import _base + class base(_base): - """ - This class is the base class for all non-stochastic optimization problems. When defining an optimization problem - in PyGMO, the user needs to write a class that inherits from this base class and needs to call its constructor. - He will then need to re-implement a number of virtual functions that define the problem objectives and constraints, - as well as defining the box-bounds on the decision vector. - """ - def __init__(self,*args): - """ - Base problem constructor. It must be called from within the derived class constructor __init__() - - USAGE: super(derived_class_name,self).__init__(dim, i_dim, n_obj, c_dim, c_ineq_dim, c_tol) - - * dim: Total dimension of the decision vector - * i_dim: dimension of the integer part of decision vector (the integer part is placed at the end of the decision vector). Defaults to 0 - * n_obj: number of objectives. Defaults to 1 - * c_dim: total dimension of the constraint vector. dDefaults to 0 - * c_ineq_dim: dimension of the inequality part of the constraint vector (inequality const. are placed at the end of the decision vector). Defaults to 0 - * c_tol: constraint tolerance. When comparing individuals, this tolerance is used to decide whether a constraint is considered satisfied. - """ - if len(args) == 0: - raise ValueError("Cannot initialise base problem without parameters for the constructor.") - _base.__init__(self,*args) - def _get_typename(self): - return str(type(self)) - def __get_deepcopy__(self): - from copy import deepcopy - return deepcopy(self) - def get_name(self): - return self._get_typename() + + """ + This class is the base class for all non-stochastic optimization problems. When defining an optimization problem + in PyGMO, the user needs to write a class that inherits from this base class and needs to call its constructor. + He will then need to re-implement a number of virtual functions that define the problem objectives and constraints, + as well as defining the box-bounds on the decision vector. + """ + + def __init__(self, *args): + """ + Base problem constructor. It must be called from within the derived class constructor __init__() + + USAGE: super(derived_class_name,self).__init__(dim, i_dim, n_obj, c_dim, c_ineq_dim, c_tol) + + * dim: Total dimension of the decision vector + * i_dim: dimension of the integer part of decision vector (the integer part is placed at the end of the decision vector). Defaults to 0 + * n_obj: number of objectives. Defaults to 1 + * c_dim: total dimension of the constraint vector. dDefaults to 0 + * c_ineq_dim: dimension of the inequality part of the constraint vector (inequality const. are placed at the end of the decision vector). Defaults to 0 + * c_tol: constraint tolerance. When comparing individuals, this tolerance is used to decide whether a constraint is considered satisfied. + """ + if len(args) == 0: + raise ValueError( + "Cannot initialise base problem without parameters for the constructor.") + _base.__init__(self, *args) + + def _get_typename(self): + return str(type(self)) + + def __get_deepcopy__(self): + from copy import deepcopy + return deepcopy(self) + + def get_name(self): + return self._get_typename() diff --git a/PyGMO/problem/_base_class.py b/PyGMO/problem/_base_class.py index 64d3b27c..e48d277c 100644 --- a/PyGMO/problem/_base_class.py +++ b/PyGMO/problem/_base_class.py @@ -1,14 +1,20 @@ -from _problem import _base +from PyGMO.problem._problem import _base + class base(_base): - def __init__(self,*args): - if len(args) == 0: - raise ValueError("Cannot initialise base problem without parameters for the constructor.") - _base.__init__(self,*args) - def _get_typename(self): - return str(type(self)) - def __get_deepcopy__(self): - from copy import deepcopy - return deepcopy(self) - def get_name(self): - return self._get_typename() + + def __init__(self, *args): + if len(args) == 0: + raise ValueError( + "Cannot initialise base problem without parameters for the constructor.") + _base.__init__(self, *args) + + def _get_typename(self): + return str(type(self)) + + def __get_deepcopy__(self): + from copy import deepcopy + return deepcopy(self) + + def get_name(self): + return self._get_typename() diff --git a/PyGMO/problem/_base_stochastic.py b/PyGMO/problem/_base_stochastic.py index 51fc5b0e..a597ee6c 100644 --- a/PyGMO/problem/_base_stochastic.py +++ b/PyGMO/problem/_base_stochastic.py @@ -1,14 +1,20 @@ -from _problem import _base_stochastic +from PyGMO.problem._problem import _base_stochastic + class base_stochastic(_base_stochastic): - def __init__(self,*args): - if len(args) == 0: - raise ValueError("Cannot initialise base problem without parameters for the constructor.") - _base_stochastic.__init__(self,*args) - def _get_typename(self): - return str(type(self)) - def __get_deepcopy__(self): - from copy import deepcopy - return deepcopy(self) - def get_name(self): - return self._get_typename() + + def __init__(self, *args): + if len(args) == 0: + raise ValueError( + "Cannot initialise base problem without parameters for the constructor.") + _base_stochastic.__init__(self, *args) + + def _get_typename(self): + return str(type(self)) + + def __get_deepcopy__(self): + from copy import deepcopy + return deepcopy(self) + + def get_name(self): + return self._get_typename() diff --git a/PyGMO/problem/_example.py b/PyGMO/problem/_example.py index b8bfcefb..f656f8f2 100644 --- a/PyGMO/problem/_example.py +++ b/PyGMO/problem/_example.py @@ -1,67 +1,76 @@ -from _base import base +from PyGMO.problem._base import base + class py_example(base): - """ - De Jong (sphere) function implemented purely in Python. - - USAGE: py_example(dim = 10) - - * dim problem dimension - """ - def __init__(self, dim = 10): - #First we call the constructor of the base class telling - #essentially to PyGMO what kind of problem to expect (1 objective, 0 contraints etc.) - super(py_example,self).__init__(dim) - - #then we set the problem bounds (in this case equal for all components) - self.set_bounds(-5.12,5.12) - - #and we define some additional 'private' data members (not really necessary in - #this case, but ... hey this is a tutorial) - self.__dim = dim - - #We reimplement the virtual method that defines the objective function. - def _objfun_impl(self,x): - f = 0; - for i in range(self.__dim): - f = f + (x[i])*(x[i]) - #note that we return a tuple with one element only. In PyGMO the objective functions - #return tuples so that multi-objective optimization is also possible. - return (f,) - - #Finally we also reimplement a virtual method that adds some output to the __repr__ method - def human_readable_extra(self): - return "\n\t Problem dimension: " + str(self.__dim) + + """ + De Jong (sphere) function implemented purely in Python. + + USAGE: py_example(dim = 10) + + * dim problem dimension + """ + + def __init__(self, dim=10): + # First we call the constructor of the base class telling + # essentially to PyGMO what kind of problem to expect (1 objective, 0 + # contraints etc.) + super(py_example, self).__init__(dim) + + # then we set the problem bounds (in this case equal for all + # components) + self.set_bounds(-5.12, 5.12) + + # and we define some additional 'private' data members (not really necessary in + # this case, but ... hey this is a tutorial) + self.__dim = dim + + # We reimplement the virtual method that defines the objective function. + def _objfun_impl(self, x): + f = 0 + for i in range(self.__dim): + f = f + (x[i]) * (x[i]) + # note that we return a tuple with one element only. In PyGMO the objective functions + # return tuples so that multi-objective optimization is also possible. + return (f,) + + # Finally we also reimplement a virtual method that adds some output to + # the __repr__ method + def human_readable_extra(self): + return "\n\t Problem dimension: " + str(self.__dim) + class py_example_max(base): + """ Analytical function to maximize - + USAGE: py_example_max() """ - + def __init__(self): - # first we call the constructor of the base telling - # to PyGMO what kind of problem to expect (1 objective, 0 constraints etc...) - super(py_example_max,self).__init__(2); - + # first we call the constructor of the base telling + # to PyGMO what kind of problem to expect (1 objective, 0 constraints + # etc...) + super(py_example_max, self).__init__(2) + # sets the problem bounds - self.set_bounds(-10,10); - + self.set_bounds(-10, 10) + # define private data members - self.__dim = 2; - + self.__dim = 2 + # initialize best known solutions - self.best_x = [[1.,-1.]]; - + self.best_x = [[1., -1.]] + # reimplement the virtual method that defines the obf function - def _objfun_impl(self,x): - f = ( - (1. - x[0])**2 - 100 * (-x[0]**2 - x[1])**2 - 1.); + def _objfun_impl(self, x): + f = (- (1. - x[0]) ** 2 - 100 * (-x[0] ** 2 - x[1]) ** 2 - 1.) return(f,) - + # reimplement the virtual method that compares fitnesses - def _compare_fitness_impl(self,f1,f2): - return f1[0]>f2[0]; + def _compare_fitness_impl(self, f1, f2): + return f1[0] > f2[0] # add some output to __repr__ def human_readable_extra(self): diff --git a/PyGMO/problem/_example_stochastic.py b/PyGMO/problem/_example_stochastic.py index cfd48465..e8e45e34 100644 --- a/PyGMO/problem/_example_stochastic.py +++ b/PyGMO/problem/_example_stochastic.py @@ -1,43 +1,48 @@ -from _base_stochastic import base_stochastic +from PyGMO.problem._base_stochastic import base_stochastic + class py_example_stochastic(base_stochastic): - """ - Noisy De Jong (sphere) function implemented purely in Python. - - USAGE: py_example_stochastic(dim = 10, seed=0) - - * dim problem dimension - * seed initial random seed - """ - def __init__(self, dim = 10, seed = 0): - #First we call the constructor of the base stochastic class. (Only - #unconstrained single objective problems can be stochastic in PyGMO) - super(py_example_stochastic,self).__init__(dim, seed) - - #then we set the problem bounds (in this case equal for all components) - self.set_bounds(-5.12,5.12) - - #and we define some additional 'private' data members (not really necessary in - #this case, but ... hey this is a tutorial) - self.__dim = dim - - def _objfun_impl(self,x): - from random import random as drng - from random import seed - - #We initialize the random number generator using the - #data member seed (in base_stochastic). This will be changed by suitable - #algorithms when a stochastic problem is used. The mod operation avoids overflows - - seed(self.seed) - - #We write the objfun using the same pseudorandonm sequence - #as long as self.seed is unchanged. - f = 0; - for i in range(self.__dim): - noise = (2 * drng() - 1) / 10 - f = f + (x[i] + noise)*(x[i] + noise) - return (f,) - def human_readable_extra(self): - return "\n\tSeed: " + str(self.seed) + """ + Noisy De Jong (sphere) function implemented purely in Python. + + USAGE: py_example_stochastic(dim = 10, seed=0) + + * dim problem dimension + * seed initial random seed + """ + + def __init__(self, dim=10, seed=0): + # First we call the constructor of the base stochastic class. (Only + # unconstrained single objective problems can be stochastic in PyGMO) + super(py_example_stochastic, self).__init__(dim, seed) + + # then we set the problem bounds (in this case equal for all + # components) + self.set_bounds(-5.12, 5.12) + + # and we define some additional 'private' data members (not really necessary in + # this case, but ... hey this is a tutorial) + self.__dim = dim + + def _objfun_impl(self, x): + from random import random as drng + from random import seed + + # We initialize the random number generator using the + # data member seed (in base_stochastic). This will be changed by suitable + # algorithms when a stochastic problem is used. The mod operation + # avoids overflows + + seed(self.seed) + + # We write the objfun using the same pseudorandonm sequence + # as long as self.seed is unchanged. + f = 0 + for i in range(self.__dim): + noise = (2 * drng() - 1) / 10 + f = f + (x[i] + noise) * (x[i] + noise) + return (f,) + + def human_readable_extra(self): + return "\n\tSeed: " + str(self.seed) diff --git a/PyGMO/problem/_gtop.py b/PyGMO/problem/_gtop.py index 1e966aa2..1c338644 100644 --- a/PyGMO/problem/_gtop.py +++ b/PyGMO/problem/_gtop.py @@ -1,161 +1,179 @@ -from _problem import cassini_1, gtoc_1,cassini_2, rosetta, messenger_full, tandem, laplace, sagas, mga_1dsm_alpha, mga_1dsm_tof, mga_incipit, mga_incipit_cstrs, mga_part -#from _problem import _gtoc_2_objective - -# Redefining the constructors of all problems to obtain good documentation and allowing kwargs -def _cassini_1_ctor(self, objectives = 1): - """ - Constructs a Cassini 1 Problem (Box-Constrained Continuous Single-Objective) - - NOTE: This problem (MGA) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] - Its single objective version has a global minimum at 4.9307 [km/s], - and it is a deceptive problem with a larger minimum at 5.303 [km/s] - - USAGE: problem.cassini_1(objectives = 1) - - * objectives: number of objectives. 1=DV, 2=DV,DT - - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(objectives) - self._orig_init(*arg_list) +from PyGMO.problem._problem import cassini_1, gtoc_1, cassini_2, rosetta, messenger_full, tandem, laplace, sagas, mga_1dsm_alpha, mga_1dsm_tof, mga_incipit, mga_incipit_cstrs, mga_part +#from _problem import _gtoc_2_objective + +# Redefining the constructors of all problems to obtain good documentation +# and allowing kwargs + + +def _cassini_1_ctor(self, objectives=1): + """ + Constructs a Cassini 1 Problem (Box-Constrained Continuous Single-Objective) + + NOTE: This problem (MGA) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] + Its single objective version has a global minimum at 4.9307 [km/s], + and it is a deceptive problem with a larger minimum at 5.303 [km/s] + + USAGE: problem.cassini_1(objectives = 1) + + * objectives: number of objectives. 1=DV, 2=DV,DT + + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(objectives) + self._orig_init(*arg_list) cassini_1._orig_init = cassini_1.__init__ cassini_1.__init__ = _cassini_1_ctor + def _gtoc_1_ctor(self): - """ - Constructs a GTOC 1 Problem (Box-Constrained Continuous Single-Objective) - - NOTE: This problem (MGA) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] - - Best known global minima is at -1,581,950 - - USAGE: problem.gtoc_1() - - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - self._orig_init(*arg_list) + """ + Constructs a GTOC 1 Problem (Box-Constrained Continuous Single-Objective) + + NOTE: This problem (MGA) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] + + Best known global minima is at -1,581,950 + + USAGE: problem.gtoc_1() + + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + self._orig_init(*arg_list) gtoc_1._orig_init = gtoc_1.__init__ gtoc_1.__init__ = _gtoc_1_ctor + def _cassini_2_ctor(self): - """ - Constructs a Cassini 2 Problem (Box-Constrained Continuous Single-Objective) - - NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] - It models the same interplanetary trajectory as the cassini_1 problem, but - in a more accurate fashion, allowing deep space manouvres - - Best known global minimum is at 8.383 [km/s] - - USAGE: problem.cassini_2() - - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - self._orig_init(*arg_list) + """ + Constructs a Cassini 2 Problem (Box-Constrained Continuous Single-Objective) + + NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] + It models the same interplanetary trajectory as the cassini_1 problem, but + in a more accurate fashion, allowing deep space manouvres + + Best known global minimum is at 8.383 [km/s] + + USAGE: problem.cassini_2() + + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + self._orig_init(*arg_list) cassini_2._orig_init = cassini_2.__init__ cassini_2.__init__ = _cassini_2_ctor + def _rosetta_ctor(self): - """ - Constructs a Rosetta Problem (Box-Constrained Continuous Single-Objective) - - NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] - - Best known global minimum is at 1.343 [km/s] - - USAGE: problem.rosetta() - - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - self._orig_init(*arg_list) + """ + Constructs a Rosetta Problem (Box-Constrained Continuous Single-Objective) + + NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] + + Best known global minimum is at 1.343 [km/s] + + USAGE: problem.rosetta() + + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + self._orig_init(*arg_list) rosetta._orig_init = rosetta.__init__ rosetta.__init__ = _rosetta_ctor + def _messenger_full_ctor(self): - """ - Constructs a Mesenger Full Problem (Box-Constrained Continuous Single-Objective) - - NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] - - Best known global minimum is at 2.113 - - USAGE: problem.messenger_full() - - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - self._orig_init(*arg_list) + """ + Constructs a Mesenger Full Problem (Box-Constrained Continuous Single-Objective) + + NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] + + Best known global minimum is at 2.113 + + USAGE: problem.messenger_full() + + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + self._orig_init(*arg_list) messenger_full._orig_init = messenger_full.__init__ messenger_full.__init__ = _messenger_full_ctor -def _tandem_ctor(self, prob_id = 7, max_tof = -1): - """ - Constructs a TandEM Problem (Box-Constrained Continuous Single-Objective) - - NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm]. The objective function is -log(m_final). - - USAGE: problem.tandem(prob_id = 7, max_tof = -1) - - * prob_id: Selects the problem variant (one of 1..25). All problems differ from the fly-by sequence - * max_tof = Activates a constriants on the maximum time of flight allowed (in years) - - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(prob_id) - arg_list.append(max_tof) - self._orig_init(*arg_list) + +def _tandem_ctor(self, prob_id=7, max_tof=-1): + """ + Constructs a TandEM Problem (Box-Constrained Continuous Single-Objective) + + NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm]. The objective function is -log(m_final). + + USAGE: problem.tandem(prob_id = 7, max_tof = -1) + + * prob_id: Selects the problem variant (one of 1..25). All problems differ from the fly-by sequence + * max_tof = Activates a constriants on the maximum time of flight allowed (in years) + + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(prob_id) + arg_list.append(max_tof) + self._orig_init(*arg_list) tandem._orig_init = tandem.__init__ tandem.__init__ = _tandem_ctor -def _laplace_ctor(self, seq = [3,2,3,3,5]): - """ - Constructs a EJSM-Laplace Problem (Box-Constrained Continuous Single-Objective) - - NOTE: This problem (MGA-1DSM) is similar to TandEM, but targets Jupiter and the user - can specify explicitly the planetary fly-by sequence - - USAGE: problem.laplace(seq = [3,2,3,3,5]) - - * seq: The planetary sequence. This is a list of ints that represent the planets to visit - 1 - Mercury, 2 - Venus, 3 - Earth, 4 - Mars, 5 - Jupiter, 6 - Saturn. It must start from 3 (Earth) - and end with 5 (Jupiter) - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(seq) - self._orig_init(*arg_list) + +def _laplace_ctor(self, seq=[3, 2, 3, 3, 5]): + """ + Constructs a EJSM-Laplace Problem (Box-Constrained Continuous Single-Objective) + + NOTE: This problem (MGA-1DSM) is similar to TandEM, but targets Jupiter and the user + can specify explicitly the planetary fly-by sequence + + USAGE: problem.laplace(seq = [3,2,3,3,5]) + + * seq: The planetary sequence. This is a list of ints that represent the planets to visit + 1 - Mercury, 2 - Venus, 3 - Earth, 4 - Mars, 5 - Jupiter, 6 - Saturn. It must start from 3 (Earth) + and end with 5 (Jupiter) + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(seq) + self._orig_init(*arg_list) laplace._orig_init = laplace.__init__ laplace.__init__ = _laplace_ctor + def _sagas_ctor(self): - """ - Constructs a SAGAS Problem (Box-Constrained Continuous Single-Objective) - - NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] - - USAGE: problem.sagas() - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - self._orig_init(*arg_list) + """ + Constructs a SAGAS Problem (Box-Constrained Continuous Single-Objective) + + NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm] + + USAGE: problem.sagas() + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + self._orig_init(*arg_list) sagas._orig_init = sagas.__init__ sagas.__init__ = _sagas_ctor #gtoc_2.obj = _gtoc_2_objective -#def _gtoc_2_ctor(self, ast1 = 815, ast2 = 300, ast3 = 110, ast4 = 47, n_seg = 10, objective = gtoc_2.obj.MASS_TIME): +# def _gtoc_2_ctor(self, ast1 = 815, ast2 = 300, ast3 = 110, ast4 = 47, n_seg = 10, objective = gtoc_2.obj.MASS_TIME): # """ # Constructs a GTOC 2 Problem (Constrained Continuous Single-Objective) # @@ -173,8 +191,8 @@ def _sagas_ctor(self): # * n_seg number of segments to be used per leg # * obj objective function in the enum {MASS,TIME,MASS_TIME} # """ -# -# # We construct the arg list for the original constructor exposed by boost_python +# +# We construct the arg list for the original constructor exposed by boost_python # arg_list=[] # arg_list.append(ast1) # arg_list.append(ast2) @@ -187,417 +205,724 @@ def _sagas_ctor(self): #gtoc_2.__init__ = _gtoc_2_ctor - from PyKEP import planet_ss, epoch, planet_js -def _mga_1dsm_alpha_ctor(self, seq = [planet_ss('earth'),planet_ss('venus'),planet_ss('earth')], t0 = [epoch(0),epoch(1000)], tof = [365.25,5.0 * 365.25], vinf = [0.5, 2.5], multi_objective = False, add_vinf_dep = False, add_vinf_arr = True): - """ - Constructs an mga_1dsm problem (alpha-encoding) - - USAGE: problem.mga_1dsm(seq = [planet_ss('earth'),planet_ss('venus'),planet_ss('earth')], t0 = [epoch(0),epoch(1000)], tof = [365.25,5.0 * 365.25], vinf = [0.5, 2.5], multi_objective = False, add_vinf_dep = False, add_vinf_arr = True) - - * seq: list of PyKEP planets defining the encounter sequence (including the starting planet) - * t0: list of two epochs defining the launch window - * tof: list of two floats defining the minimum and maximum allowed mission length (days) - * vinf: list of two floats defining the minimum and maximum allowed initial hyperbolic velocity at launch (km/sec) - * multi_objective: when True constructs a multiobjective problem (dv, T) - * add_vinf_dep: when True the computed Dv includes the initial hyperbolic velocity (at launch) - * add_vinf_arr: when True the computed Dv includes the final hyperbolic velocity (at arrival) - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(seq) - arg_list.append(t0[0]) - arg_list.append(t0[1]) - arg_list.append(tof[0]) - arg_list.append(tof[1]) - arg_list.append(vinf[0]) - arg_list.append(vinf[1]) - arg_list.append(multi_objective) - arg_list.append(add_vinf_dep) - arg_list.append(add_vinf_arr) - self._orig_init(*arg_list) + +def _mga_1dsm_alpha_ctor( + self, seq=[planet_ss('earth'), planet_ss('venus'), planet_ss('earth')], + t0=[epoch(0), epoch(1000)], tof=[365.25, 5.0 * 365.25], vinf=[0.5, + 2.5], multi_objective=False, add_vinf_dep=False, add_vinf_arr=True): + """ + Constructs an mga_1dsm problem (alpha-encoding) + + USAGE: problem.mga_1dsm(seq = [planet_ss('earth'),planet_ss('venus'),planet_ss('earth')], t0 = [epoch(0),epoch(1000)], tof = [365.25,5.0 * 365.25], vinf = [0.5, 2.5], multi_objective = False, add_vinf_dep = False, add_vinf_arr = True) + + * seq: list of PyKEP planets defining the encounter sequence (including the starting planet) + * t0: list of two epochs defining the launch window + * tof: list of two floats defining the minimum and maximum allowed mission length (days) + * vinf: list of two floats defining the minimum and maximum allowed initial hyperbolic velocity at launch (km/sec) + * multi_objective: when True constructs a multiobjective problem (dv, T) + * add_vinf_dep: when True the computed Dv includes the initial hyperbolic velocity (at launch) + * add_vinf_arr: when True the computed Dv includes the final hyperbolic velocity (at arrival) + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(seq) + arg_list.append(t0[0]) + arg_list.append(t0[1]) + arg_list.append(tof[0]) + arg_list.append(tof[1]) + arg_list.append(vinf[0]) + arg_list.append(vinf[1]) + arg_list.append(multi_objective) + arg_list.append(add_vinf_dep) + arg_list.append(add_vinf_arr) + self._orig_init(*arg_list) mga_1dsm_alpha._orig_init = mga_1dsm_alpha.__init__ mga_1dsm_alpha.__init__ = _mga_1dsm_alpha_ctor -def _mga_1dsm_tof_ctor(self, seq = [planet_ss('earth'),planet_ss('venus'),planet_ss('earth')], t0 = [epoch(0),epoch(1000)], tof = [ [50, 900], [50, 900] ], vinf = [0.5, 2.5], multi_objective = False, add_vinf_dep = False, add_vinf_arr = True): - """ - Constructs an mga_1dsm problem (tof-encoding) - - USAGE: problem.mga_1dsm(seq = [planet_ss('earth'),planet_ss('venus'),planet_ss('earth')], t0 = [epoch(0),epoch(1000)], tof = [ [50, 900], [50, 900] ], vinf = [0.5, 2.5], multi_objective = False, add_vinf_dep = False, add_vinf_arr = True) - - * seq: list of PyKEP planets defining the encounter sequence (including the starting planet) - * t0: list of two epochs defining the launch window - * tof: list of intervals defining the times of flight (days) - * vinf: list of two floats defining the minimum and maximum allowed initial hyperbolic velocity at launch (km/sec) - * multi_objective: when True constructs a multiobjective problem (dv, T) - * add_vinf_dep: when True the computed Dv includes the initial hyperbolic velocity (at launch) - * add_vinf_arr: when True the computed Dv includes the final hyperbolic velocity (at arrival) - """ - - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(seq) - arg_list.append(t0[0]) - arg_list.append(t0[1]) - arg_list.append(tof) - arg_list.append(vinf[0]) - arg_list.append(vinf[1]) - arg_list.append(multi_objective) - arg_list.append(add_vinf_dep) - arg_list.append(add_vinf_arr) - self._orig_init(*arg_list) +def _mga_1dsm_tof_ctor( + self, seq=[ + planet_ss('earth'), planet_ss('venus'), planet_ss('earth')], t0=[ + epoch(0), epoch(1000)], tof=[ + [ + 50, 900], [ + 50, 900]], vinf=[ + 0.5, 2.5], multi_objective=False, add_vinf_dep=False, add_vinf_arr=True): + """ + Constructs an mga_1dsm problem (tof-encoding) + + USAGE: problem.mga_1dsm(seq = [planet_ss('earth'),planet_ss('venus'),planet_ss('earth')], t0 = [epoch(0),epoch(1000)], tof = [ [50, 900], [50, 900] ], vinf = [0.5, 2.5], multi_objective = False, add_vinf_dep = False, add_vinf_arr = True) + + * seq: list of PyKEP planets defining the encounter sequence (including the starting planet) + * t0: list of two epochs defining the launch window + * tof: list of intervals defining the times of flight (days) + * vinf: list of two floats defining the minimum and maximum allowed initial hyperbolic velocity at launch (km/sec) + * multi_objective: when True constructs a multiobjective problem (dv, T) + * add_vinf_dep: when True the computed Dv includes the initial hyperbolic velocity (at launch) + * add_vinf_arr: when True the computed Dv includes the final hyperbolic velocity (at arrival) + """ + + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(seq) + arg_list.append(t0[0]) + arg_list.append(t0[1]) + arg_list.append(tof) + arg_list.append(vinf[0]) + arg_list.append(vinf[1]) + arg_list.append(multi_objective) + arg_list.append(add_vinf_dep) + arg_list.append(add_vinf_arr) + self._orig_init(*arg_list) mga_1dsm_tof._orig_init = mga_1dsm_tof.__init__ mga_1dsm_tof.__init__ = _mga_1dsm_tof_ctor -def _mga_incipit_ctor(self, seq = [planet_js('io'),planet_js('io'),planet_js('europa')], t0 = [epoch(7305.0),epoch(11323.0)],tof = [[100,200],[3,200],[4,100]]): - """ - USAGE: mga_incipit(seq = [planet_js('io'),planet_js('io'),planet_js('europa')], t0 = [epoch(6905.0),epoch(11323.0)], tof = [[100,200],[3,200],[4,100]]) - - * seq: list of jupiter moons defining the trajectory incipit - * t0: list of two epochs defining the launch window - * tof: list of n lists containing the lower and upper bounds for the legs flight times (days) - """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(seq) - arg_list.append(t0[0]) - arg_list.append(t0[1]) - arg_list.append(tof) - self._orig_init(*arg_list) +def _mga_incipit_ctor( + self, seq=[ + planet_js('io'), planet_js('io'), planet_js('europa')], t0=[ + epoch(7305.0), epoch(11323.0)], tof=[ + [ + 100, 200], [ + 3, 200], [ + 4, 100]]): + """ + USAGE: mga_incipit(seq = [planet_js('io'),planet_js('io'),planet_js('europa')], t0 = [epoch(6905.0),epoch(11323.0)], tof = [[100,200],[3,200],[4,100]]) + + * seq: list of jupiter moons defining the trajectory incipit + * t0: list of two epochs defining the launch window + * tof: list of n lists containing the lower and upper bounds for the legs flight times (days) + """ + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(seq) + arg_list.append(t0[0]) + arg_list.append(t0[1]) + arg_list.append(tof) + self._orig_init(*arg_list) mga_incipit._orig_init = mga_incipit.__init__ mga_incipit.__init__ = _mga_incipit_ctor -def _mga_incipit_cstrs_ctor(self, seq = [planet_js('io'),planet_js('io'),planet_js('europa')], t0 = [epoch(7305.0),epoch(11323.0)],tof = [[100,200],[3,200],[4,100]], Tmax = 365.25, Dmin = 0.2): - """ - USAGE: mga_incipit_cstrs(seq = [planet_js('io'),planet_js('io'),planet_js('europa')], t0 = [epoch(6905.0),epoch(11323.0)], tof = [[100,200],[3,200],[4,100]], Tmax = 365.25, Dmin = 0.2) - - * seq: list of jupiter moons defining the trajectory incipit - * t0: list of two epochs defining the launch window - * tof: list of n lists containing the lower and upper bounds for the legs flight times (days) - """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(seq) - arg_list.append(t0[0]) - arg_list.append(t0[1]) - arg_list.append(tof) - arg_list.append(Tmax) - arg_list.append(Dmin) - self._orig_init(*arg_list) + +def _mga_incipit_cstrs_ctor( + self, seq=[ + planet_js('io'), planet_js('io'), planet_js('europa')], t0=[ + epoch(7305.0), epoch(11323.0)], tof=[ + [ + 100, 200], [ + 3, 200], [ + 4, 100]], Tmax=300.00, Dmin=2.0): + """ + USAGE: mga_incipit_cstrs(seq = [planet_js('io'),planet_js('io'),planet_js('europa')], t0 = [epoch(6905.0),epoch(11323.0)], tof = [[100,200],[3,200],[4,100]], Tmax = 365.25, Dmin = 0.2) + + * seq: list of jupiter moons defining the trajectory incipit + * t0: list of two epochs defining the launch window + * tof: list of n lists containing the lower and upper bounds for the legs flight times (days) + """ + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(seq) + arg_list.append(t0[0]) + arg_list.append(t0[1]) + arg_list.append(tof) + arg_list.append(Tmax) + arg_list.append(Dmin) + self._orig_init(*arg_list) mga_incipit_cstrs._orig_init = mga_incipit_cstrs.__init__ mga_incipit_cstrs.__init__ = _mga_incipit_cstrs_ctor -def _mga_part_ctor(self, seq = [planet_js('europa'),planet_js('europa'),planet_js('europa')], tof = [[5,50],[5,50]], t0 = epoch(11000), v_inf_in = [1500.0,350.0,145.0]): - """ - USAGE: mga_part(seq = [planet_js('europa'),planet_js('europa'),planet_js('europa')], tof = [[5,50],[5,50]], t0 = epoch(11000), v_inf_in[1500.0,350.0,145.0]) - - * seq: list of jupiter moons defining the trajectory incipit - * tof: list of n lists containing the lower and upper bounds for the legs flight times (days) - * t0: starting epoch - * v_inf_in: Incoming spacecraft relative velocity - """ - # We construct the arg list for the original constructor exposed by boost_python - arg_list=[] - arg_list.append(seq) - arg_list.append(tof) - arg_list.append(t0) - arg_list.append(v_inf_in) - self._orig_init(*arg_list) + +def _mga_part_ctor( + self, seq=[ + planet_js('europa'), planet_js('europa'), planet_js('europa')], tof=[ + [ + 5, 50], [ + 5, 50]], t0=epoch(11000), v_inf_in=[ + 1500.0, 350.0, 145.0]): + """ + USAGE: mga_part(seq = [planet_js('europa'),planet_js('europa'),planet_js('europa')], tof = [[5,50],[5,50]], t0 = epoch(11000), v_inf_in[1500.0,350.0,145.0]) + + * seq: list of jupiter moons defining the trajectory incipit + * tof: list of n lists containing the lower and upper bounds for the legs flight times (days) + * t0: starting epoch + * v_inf_in: Incoming spacecraft relative velocity + """ + # We construct the arg list for the original constructor exposed by + # boost_python + arg_list = [] + arg_list.append(seq) + arg_list.append(tof) + arg_list.append(t0) + arg_list.append(v_inf_in) + self._orig_init(*arg_list) mga_part._orig_init = mga_part.__init__ mga_part.__init__ = _mga_part_ctor -#Plot of the trajectory for an mga_1dsm problem -def _mga_1dsm_alpha_plot(self,x): - """ - Plots the trajectory represented by the decision vector x - """ - import matplotlib as mpl - from mpl_toolkits.mplot3d import Axes3D - import matplotlib.pyplot as plt - from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler - from PyKEP import epoch, propagate_lagrangian, lambert_problem,fb_prop, AU, MU_SUN, DAY2SEC - from math import pi, acos, cos, sin - from scipy.linalg import norm - - mpl.rcParams['legend.fontsize'] = 10 - fig = plt.figure() - ax = fig.gca(projection='3d') - ax.scatter(0,0,0, color='y') - - seq = self.get_sequence() - - n = (len(seq)-1) - #1 - we 'decode' the chromosome recording the various times of flight (days) in the list T - T = list([0]*(n)) - alpha_sum = 0 - for i in xrange(n): - T[i] = x[1]*x[6+4*i] - alpha_sum += x[6+4*i] - for i in xrange(n): - T[i] /= alpha_sum - - - #2 - We compute the epochs and ephemerides of the planetary encounters - t_P = list([None] * (n+1)) - r_P = list([None] * (n+1)) - v_P = list([None] * (n+1)) - DV = list([None] * (n+1)) - - for i,planet in enumerate(seq): - t_P[i] = epoch(x[0] + sum(T[0:i])) - r_P[i],v_P[i] = planet.eph(t_P[i]) - plot_planet(ax, planet, t0=t_P[i], color=(0.8,0.6,0.8), legend=True, units = AU) - - #3 - We start with the first leg - theta = 2*pi*x[2] - phi = acos(2*x[3]-1)-pi/2 - - Vinfx = x[4]*cos(phi)*cos(theta) - Vinfy = x[4]*cos(phi)*sin(theta) - Vinfz = x[4]*sin(phi) - - - v0 = [a+b for a,b in zip(v_P[0],[Vinfx,Vinfy,Vinfz])] - r,v = propagate_lagrangian(r_P[0],v0,x[5]*T[0]*DAY2SEC,seq[0].mu_central_body) - plot_kepler(ax,r_P[0],v0,x[5]*T[0]*DAY2SEC,seq[0].mu_central_body,N = 100, color='b', legend=False, units = AU) - - #Lambert arc to reach seq[1] - dt = (1-x[5])*T[0]*DAY2SEC - l = lambert_problem(r,r_P[1],dt,seq[0].mu_central_body) - plot_lambert(ax,l, sol = 0, color='r', legend=False, units = AU) - v_end_l = l.get_v2()[0] - v_beg_l = l.get_v1()[0] - - #First DSM occuring at time nu1*T1 - DV[0] = norm([a-b for a,b in zip(v_beg_l,v)]) - - #4 - And we proceed with each successive leg - for i in range(1,n): - #Fly-by - v_out = fb_prop(v_end_l,v_P[i],x[8+(i-1)*4]*seq[i].radius,x[7+(i-1)*4],seq[i].mu_self) - #s/c propagation before the DSM - r,v = propagate_lagrangian(r_P[i],v_out,x[9+(i-1)*4]*T[i]*DAY2SEC,seq[0].mu_central_body) - plot_kepler(ax,r_P[i],v_out,x[9+(i-1)*4]*T[i]*DAY2SEC,seq[0].mu_central_body,N = 100, color='b', legend=False, units = AU) - #Lambert arc to reach Earth during (1-nu2)*T2 (second segment) - dt = (1-x[9+(i-1)*4])*T[i]*DAY2SEC - l = lambert_problem(r,r_P[i+1],dt,seq[0].mu_central_body) - plot_lambert(ax,l, sol = 0, color='r', legend=False, units = AU) - v_end_l = l.get_v2()[0] - v_beg_l = l.get_v1()[0] - #DSM occurring at time nu2*T2 - DV[i] = norm([a-b for a,b in zip(v_beg_l,v)]) - return ax +# Plot of the trajectory for an mga_1dsm problem + + +def _mga_1dsm_alpha_plot(self, x): + """ + Plots the trajectory represented by the decision vector x + """ + import matplotlib as mpl + from mpl_toolkits.mplot3d import Axes3D + import matplotlib.pyplot as plt + from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler + from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC + from math import pi, acos, cos, sin + from scipy.linalg import norm + + mpl.rcParams['legend.fontsize'] = 10 + fig = plt.figure() + ax = fig.gca(projection='3d') + ax.scatter(0, 0, 0, color='y') + + seq = self.get_sequence() + + n = (len(seq) - 1) + # 1 - we 'decode' the chromosome recording the various times of flight + # (days) in the list T + T = list([0] * (n)) + alpha_sum = 0 + for i in range(n): + T[i] = x[1] * x[6 + 4 * i] + alpha_sum += x[6 + 4 * i] + for i in range(n): + T[i] /= alpha_sum + + # 2 - We compute the epochs and ephemerides of the planetary encounters + t_P = list([None] * (n + 1)) + r_P = list([None] * (n + 1)) + v_P = list([None] * (n + 1)) + DV = list([None] * (n + 1)) + + for i, planet in enumerate(seq): + t_P[i] = epoch(x[0] + sum(T[0:i])) + r_P[i], v_P[i] = planet.eph(t_P[i]) + plot_planet(ax, planet, t0=t_P[i], color=( + 0.8, 0.6, 0.8), legend=True, units = AU) + + # 3 - We start with the first leg + theta = 2 * pi * x[2] + phi = acos(2 * x[3] - 1) - pi / 2 + + Vinfx = x[4] * cos(phi) * cos(theta) + Vinfy = x[4] * cos(phi) * sin(theta) + Vinfz = x[4] * sin(phi) + + v0 = [a + b for a, b in zip(v_P[0], [Vinfx, Vinfy, Vinfz])] + r, v = propagate_lagrangian( + r_P[0], v0, x[5] * T[0] * DAY2SEC, seq[0].mu_central_body) + plot_kepler( + ax, + r_P[0], + v0, + x[5] * + T[0] * + DAY2SEC, + seq[0].mu_central_body, + N=100, + color='b', + legend=False, + units=AU) + + # Lambert arc to reach seq[1] + dt = (1 - x[5]) * T[0] * DAY2SEC + l = lambert_problem(r, r_P[1], dt, seq[0].mu_central_body) + plot_lambert(ax, l, sol=0, color='r', legend=False, units=AU) + v_end_l = l.get_v2()[0] + v_beg_l = l.get_v1()[0] + + # First DSM occuring at time nu1*T1 + DV[0] = norm([a - b for a, b in zip(v_beg_l, v)]) + + # 4 - And we proceed with each successive leg + for i in range(1, n): + # Fly-by + v_out = fb_prop(v_end_l, + v_P[i], + x[8 + (i - 1) * 4] * seq[i].radius, + x[7 + (i - 1) * 4], + seq[i].mu_self) + # s/c propagation before the DSM + r, v = propagate_lagrangian( + r_P[i], v_out, x[9 + (i - 1) * 4] * T[i] * DAY2SEC, seq[0]. + mu_central_body) + plot_kepler(ax, + r_P[i], + v_out, + x[9 + (i - 1) * 4] * T[i] * DAY2SEC, + seq[0].mu_central_body, + N=100, + color='b', + legend=False, + units=AU) + # Lambert arc to reach Earth during (1-nu2)*T2 (second segment) + dt = (1 - x[9 + (i - 1) * 4]) * T[i] * DAY2SEC + l = lambert_problem(r, r_P[i + 1], dt, seq[0].mu_central_body) + plot_lambert(ax, l, sol=0, color='r', legend=False, units=AU) + v_end_l = l.get_v2()[0] + v_beg_l = l.get_v1()[0] + # DSM occurring at time nu2*T2 + DV[i] = norm([a - b for a, b in zip(v_beg_l, v)]) + return ax mga_1dsm_alpha.plot = _mga_1dsm_alpha_plot -#Plot of the trajectory for an mga_1dsm problem -def _mga_1dsm_tof_plot(self,x): - """ - Plots the trajectory represented by the decision vector x - """ - import matplotlib as mpl - from mpl_toolkits.mplot3d import Axes3D - import matplotlib.pyplot as plt - from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler - from PyKEP import epoch, propagate_lagrangian, lambert_problem,fb_prop, AU, MU_SUN, DAY2SEC - from math import pi, acos, cos, sin - from scipy.linalg import norm - - mpl.rcParams['legend.fontsize'] = 10 - fig = plt.figure() - ax = fig.gca(projection='3d') - ax.scatter(0,0,0, color='y') - - seq = self.get_sequence() - - n = (len(seq)-1) - #1 - we 'decode' the chromosome recording the various times of flight (days) in the list T - T = x[5::4] - - #2 - We compute the epochs and ephemerides of the planetary encounters - t_P = list([None] * (n+1)) - r_P = list([None] * (n+1)) - v_P = list([None] * (n+1)) - DV = list([None] * (n+1)) - - for i,planet in enumerate(seq): - t_P[i] = epoch(x[0] + sum(T[0:i])) - r_P[i],v_P[i] = planet.eph(t_P[i]) - plot_planet(ax, planet, t0=t_P[i], color=(0.8,0.6,0.8), legend=True, units = AU) - - #3 - We start with the first leg - theta = 2*pi*x[1] - phi = acos(2*x[2]-1)-pi/2 - - Vinfx = x[3]*cos(phi)*cos(theta) - Vinfy = x[3]*cos(phi)*sin(theta) - Vinfz = x[3]*sin(phi) - - v0 = [a+b for a,b in zip(v_P[0],[Vinfx,Vinfy,Vinfz])] - r,v = propagate_lagrangian(r_P[0],v0,x[4]*T[0]*DAY2SEC,seq[0].mu_central_body) - plot_kepler(ax,r_P[0],v0,x[4]*T[0]*DAY2SEC,seq[0].mu_central_body,N = 100, color='b', legend=False, units = AU) - - #Lambert arc to reach seq[1] - dt = (1-x[4])*T[0]*DAY2SEC - l = lambert_problem(r,r_P[1],dt,seq[0].mu_central_body) - plot_lambert(ax,l, sol = 0, color='r', legend=False, units = AU) - v_end_l = l.get_v2()[0] - v_beg_l = l.get_v1()[0] - - #First DSM occuring at time nu1*T1 - DV[0] = norm([a-b for a,b in zip(v_beg_l,v)]) - - #4 - And we proceed with each successive leg - for i in range(1,n): - #Fly-by - v_out = fb_prop(v_end_l,v_P[i],x[7+(i-1)*4]*seq[i].radius,x[6+(i-1)*4],seq[i].mu_self) - #s/c propagation before the DSM - r,v = propagate_lagrangian(r_P[i],v_out,x[8+(i-1)*4]*T[i]*DAY2SEC,seq[0].mu_central_body) - plot_kepler(ax,r_P[i],v_out,x[8+(i-1)*4]*T[i]*DAY2SEC,seq[0].mu_central_body,N = 100, color='b', legend=False, units = AU) - #Lambert arc to reach Earth during (1-nu2)*T2 (second segment) - dt = (1-x[8+(i-1)*4])*T[i]*DAY2SEC - l = lambert_problem(r,r_P[i+1],dt,seq[0].mu_central_body) - plot_lambert(ax,l, sol = 0, color='r', legend=False, units = AU) - v_end_l = l.get_v2()[0] - v_beg_l = l.get_v1()[0] - #DSM occurring at time nu2*T2 - DV[i] = norm([a-b for a,b in zip(v_beg_l,v)]) - return ax -mga_1dsm_tof.plot = _mga_1dsm_tof_plot - -#Plot of the trajectory of an mga_incipit problem -def _mga_incipit_plot(self,x, plot_leg_0 = False): - """ - Plots the trajectory represented by the decision vector x - - Example:: - - prob.plot(x) - """ - import matplotlib as mpl - from mpl_toolkits.mplot3d import Axes3D - import matplotlib.pyplot as plt - from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler - from PyKEP import epoch, propagate_lagrangian, lambert_problem,fb_prop, AU, MU_SUN, DAY2SEC - from math import pi, acos, cos, sin - from scipy.linalg import norm - - mpl.rcParams['legend.fontsize'] = 10 - fig = plt.figure() - ax = fig.gca(projection='3d',aspect='equal') - ax.scatter(0,0,0, color='y') - - JR = 71492000.0 - legs = len(x)/4 - seq = self.get_sequence() - common_mu = seq[0].mu_central_body - - #1 - we 'decode' the chromosome recording the various times of flight (days) in the list T - T = x[3::4] - - #2 - We compute the epochs and ephemerides of the planetary encounters - t_P = list([None] * legs) - r_P = list([None] * legs) - v_P = list([None] * legs) - DV = list([None] * legs) - - for i,planet in enumerate(seq): - t_P[i] = epoch(x[0]+sum(T[:i+1])) - r_P[i],v_P[i] = planet.eph(t_P[i]) - plot_planet(ax, planet, t0=t_P[i], color=(0.8,0.6,0.8), legend=True, units = JR) - - #3 - We start with the first leg: a lambert arc - theta = 2*pi*x[1] - phi = acos(2*x[2]-1)-pi/2 - r = [cos(phi)*sin(theta), cos(phi)*cos(theta), sin(phi)] #phi close to zero is in the moon orbit plane injection - r = [JR*1000*d for d in r] - - l = lambert_problem(r,r_P[0],T[0]*DAY2SEC,common_mu, False, False) - if (plot_leg_0): - plot_lambert(ax,l, sol = 0, color='k', legend=False, units = JR, N=500) - - #Lambert arc to reach seq[1] - v_end_l = l.get_v2()[0] - v_beg_l = l.get_v1()[0] - - #4 - And we proceed with each successive leg - for i in xrange(1,legs): - #Fly-by - v_out = fb_prop(v_end_l,v_P[i-1],x[1+4*i]*seq[i-1].radius,x[4*i],seq[i-1].mu_self) - #s/c propagation before the DSM - r,v = propagate_lagrangian(r_P[i-1],v_out,x[4*i+2]*T[i]*DAY2SEC,common_mu) - plot_kepler(ax,r_P[i-1],v_out,x[4*i+2]*T[i]*DAY2SEC,common_mu,N = 500, color='b', legend=False, units = JR) - #Lambert arc to reach Earth during (1-nu2)*T2 (second segment) - dt = (1-x[4*i+2])*T[i]*DAY2SEC - l = lambert_problem(r,r_P[i],dt,common_mu, False, False) - plot_lambert(ax,l, sol = 0, color='r', legend=False, units = JR, N=500) - v_end_l = l.get_v2()[0] - v_beg_l = l.get_v1()[0] - plt.show() - return ax -mga_incipit.plot = _mga_incipit_plot - -#Plot of the trajectory of an mga_part problem -def _mga_part_plot(self,x): - """ - Plots the trajectory represented by the decision vector x - - Example:: - - prob.plot(x) - """ - import matplotlib as mpl - from mpl_toolkits.mplot3d import Axes3D - import matplotlib.pyplot as plt - from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler - from PyKEP import epoch, propagate_lagrangian, lambert_problem,fb_prop, AU, MU_SUN, DAY2SEC - from math import pi, acos, cos, sin - from scipy.linalg import norm - - mpl.rcParams['legend.fontsize'] = 10 - fig = plt.figure() - ax = fig.gca(projection='3d', aspect='equal') - ax.scatter(0,0,0, color='y') - - JR = 71492000.0 - legs = len(x)/4 - seq = self.get_sequence() - common_mu = seq[0].mu_central_body - start_mjd2000 = self.t0.mjd2000 - - #1 - we 'decode' the chromosome recording the various times of flight (days) in the list T - T = x[3::4] - - #2 - We compute the epochs and ephemerides of the planetary encounters - t_P = list([None] * (legs+1)) - r_P = list([None] * (legs+1)) - v_P = list([None] * (legs+1)) - - for i,planet in enumerate(seq): - t_P[i] = epoch(start_mjd2000+sum(T[:i])) - r_P[i],v_P[i] = planet.eph(t_P[i]) - plot_planet(ax, planet, t0=t_P[i], color=(0.8,0.6,0.8), legend=True, units = JR) - - v_end_l = [a+b for a,b in zip(v_P[0],self.vinf_in)] - #4 - And we iterate on the legs - for i in xrange(0,legs): - #Fly-by - v_out = fb_prop(v_end_l,v_P[i],x[1+4*i]*seq[i-1].radius,x[4*i],seq[i].mu_self) - #s/c propagation before the DSM - r,v = propagate_lagrangian(r_P[i],v_out,x[4*i+2]*T[i]*DAY2SEC,common_mu) - plot_kepler(ax,r_P[i],v_out,x[4*i+2]*T[i]*DAY2SEC,common_mu,N = 500, color='b', legend=False, units = JR) - #Lambert arc to reach Earth during (1-nu2)*T2 (second segment) - dt = (1-x[4*i+2])*T[i]*DAY2SEC - l = lambert_problem(r,r_P[i+1],dt,common_mu, False, False) - plot_lambert(ax,l, sol = 0, color='r', legend=False, units = JR, N=500) - v_end_l = l.get_v2()[0] - v_beg_l = l.get_v1()[0] - plt.show() - return ax +# Plot of the trajectory for an mga_1dsm problem +def _mga_1dsm_tof_plot_old(self, x): + """ + Plots the trajectory represented by the decision vector x + """ + import matplotlib as mpl + from mpl_toolkits.mplot3d import Axes3D + import matplotlib.pyplot as plt + from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler + from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC + from math import pi, acos, cos, sin + from scipy.linalg import norm + + mpl.rcParams['legend.fontsize'] = 10 + fig = plt.figure() + ax = fig.gca(projection='3d') + ax.scatter(0, 0, 0, color='y') + + seq = self.get_sequence() + + n = (len(seq) - 1) + # 1 - we 'decode' the chromosome recording the various times of flight + # (days) in the list T + T = x[5::4] + + # 2 - We compute the epochs and ephemerides of the planetary encounters + t_P = list([None] * (n + 1)) + r_P = list([None] * (n + 1)) + v_P = list([None] * (n + 1)) + DV = list([None] * (n + 1)) + + for i, planet in enumerate(seq): + t_P[i] = epoch(x[0] + sum(T[0:i])) + r_P[i], v_P[i] = planet.eph(t_P[i]) + plot_planet(ax, planet, t0=t_P[i], color=( + 0.8, 0.6, 0.8), legend=True, units = AU) + + # 3 - We start with the first leg + theta = 2 * pi * x[1] + phi = acos(2 * x[2] - 1) - pi / 2 + + Vinfx = x[3] * cos(phi) * cos(theta) + Vinfy = x[3] * cos(phi) * sin(theta) + Vinfz = x[3] * sin(phi) + + v0 = [a + b for a, b in zip(v_P[0], [Vinfx, Vinfy, Vinfz])] + r, v = propagate_lagrangian( + r_P[0], v0, x[4] * T[0] * DAY2SEC, seq[0].mu_central_body) + plot_kepler( + ax, + r_P[0], + v0, + x[4] * + T[0] * + DAY2SEC, + seq[0].mu_central_body, + N=100, + color='b', + legend=False, + units=AU) + + # Lambert arc to reach seq[1] + dt = (1 - x[4]) * T[0] * DAY2SEC + l = lambert_problem(r, r_P[1], dt, seq[0].mu_central_body) + plot_lambert(ax, l, sol=0, color='r', legend=False, units=AU) + v_end_l = l.get_v2()[0] + v_beg_l = l.get_v1()[0] + + # First DSM occuring at time nu1*T1 + DV[0] = norm([a - b for a, b in zip(v_beg_l, v)]) + + # 4 - And we proceed with each successive leg + for i in range(1, n): + # Fly-by + v_out = fb_prop(v_end_l, + v_P[i], + x[7 + (i - 1) * 4] * seq[i].radius, + x[6 + (i - 1) * 4], + seq[i].mu_self) + # s/c propagation before the DSM + r, v = propagate_lagrangian( + r_P[i], v_out, x[8 + (i - 1) * 4] * T[i] * DAY2SEC, seq[0]. + mu_central_body) + plot_kepler(ax, + r_P[i], + v_out, + x[8 + (i - 1) * 4] * T[i] * DAY2SEC, + seq[0].mu_central_body, + N=100, + color='b', + legend=False, + units=AU) + # Lambert arc to reach Earth during (1-nu2)*T2 (second segment) + dt = (1 - x[8 + (i - 1) * 4]) * T[i] * DAY2SEC + l = lambert_problem(r, r_P[i + 1], dt, seq[0].mu_central_body) + plot_lambert(ax, l, sol=0, color='r', legend=False, units=AU) + v_end_l = l.get_v2()[0] + v_beg_l = l.get_v1()[0] + # DSM occurring at time nu2*T2 + DV[i] = norm([a - b for a, b in zip(v_beg_l, v)]) + return ax +mga_1dsm_tof.plot_old = _mga_1dsm_tof_plot_old + +# Plot of the trajectory of an mga_incipit problem + + +def _mga_incipit_plot_old(self, x, plot_leg_0=False): + """ + Plots the trajectory represented by the decision vector x + + Example:: + + prob.plot(x) + """ + import matplotlib as mpl + from mpl_toolkits.mplot3d import Axes3D + import matplotlib.pyplot as plt + from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler + from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC + from math import pi, acos, cos, sin + from scipy.linalg import norm + + mpl.rcParams['legend.fontsize'] = 10 + fig = plt.figure() + ax = fig.gca(projection='3d', aspect='equal') + ax.scatter(0, 0, 0, color='y') + + JR = 71492000.0 + legs = len(x) / 4 + seq = self.get_sequence() + common_mu = seq[0].mu_central_body + + # 1 - we 'decode' the chromosome recording the various times of flight + # (days) in the list T + T = x[3::4] + + # 2 - We compute the epochs and ephemerides of the planetary encounters + t_P = list([None] * legs) + r_P = list([None] * legs) + v_P = list([None] * legs) + DV = list([None] * legs) + + for i, planet in enumerate(seq): + t_P[i] = epoch(x[0] + sum(T[:i + 1])) + r_P[i], v_P[i] = planet.eph(t_P[i]) + plot_planet(ax, planet, t0=t_P[i], color=( + 0.8, 0.6, 0.8), legend=True, units = JR) + + # 3 - We start with the first leg: a lambert arc + theta = 2 * pi * x[1] + phi = acos(2 * x[2] - 1) - pi / 2 + # phi close to zero is in the moon orbit plane injection + r = [cos(phi) * sin(theta), cos(phi) * cos(theta), sin(phi)] + r = [JR * 1000 * d for d in r] + + l = lambert_problem(r, r_P[0], T[0] * DAY2SEC, common_mu, False, False) + if (plot_leg_0): + plot_lambert(ax, l, sol=0, color='k', legend=False, units=JR, N=500) + + # Lambert arc to reach seq[1] + v_end_l = l.get_v2()[0] + v_beg_l = l.get_v1()[0] + + # 4 - And we proceed with each successive leg + for i in range(1, legs): + # Fly-by + v_out = fb_prop(v_end_l, + v_P[i - 1], + x[1 + 4 * i] * seq[i - 1].radius, + x[4 * i], + seq[i - 1].mu_self) + # s/c propagation before the DSM + r, v = propagate_lagrangian( + r_P[i - 1], v_out, x[4 * i + 2] * T[i] * DAY2SEC, common_mu) + plot_kepler(ax, + r_P[i - 1], + v_out, + x[4 * i + 2] * T[i] * DAY2SEC, + common_mu, + N=500, + color='b', + legend=False, + units=JR) + # Lambert arc to reach Earth during (1-nu2)*T2 (second segment) + dt = (1 - x[4 * i + 2]) * T[i] * DAY2SEC + l = lambert_problem(r, r_P[i], dt, common_mu, False, False) + plot_lambert(ax, l, sol=0, color='r', legend=False, units=JR, N=500) + v_end_l = l.get_v2()[0] + v_beg_l = l.get_v1()[0] + plt.show() + return ax +mga_incipit.plot_old = _mga_incipit_plot_old + +# Plot of the trajectory of an mga_part problem + + +def _mga_part_plot_old(self, x): + """ + Plots the trajectory represented by the decision vector x + + Example:: + + prob.plot(x) + """ + import matplotlib as mpl + from mpl_toolkits.mplot3d import Axes3D + import matplotlib.pyplot as plt + from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler + from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC + from math import pi, acos, cos, sin + from scipy.linalg import norm + + mpl.rcParams['legend.fontsize'] = 10 + fig = plt.figure() + ax = fig.gca(projection='3d', aspect='equal') + ax.scatter(0, 0, 0, color='y') + + JR = 71492000.0 + legs = len(x) / 4 + seq = self.get_sequence() + common_mu = seq[0].mu_central_body + start_mjd2000 = self.t0.mjd2000 + + # 1 - we 'decode' the chromosome recording the various times of flight + # (days) in the list T + T = x[3::4] + + # 2 - We compute the epochs and ephemerides of the planetary encounters + t_P = list([None] * (legs + 1)) + r_P = list([None] * (legs + 1)) + v_P = list([None] * (legs + 1)) + + for i, planet in enumerate(seq): + t_P[i] = epoch(start_mjd2000 + sum(T[:i])) + r_P[i], v_P[i] = planet.eph(t_P[i]) + plot_planet(ax, planet, t0=t_P[i], color=( + 0.8, 0.6, 0.8), legend=True, units = JR) + + v_end_l = [a + b for a, b in zip(v_P[0], self.vinf_in)] + # 4 - And we iterate on the legs + for i in range(0, legs): + # Fly-by + v_out = fb_prop(v_end_l, + v_P[i], + x[1 + 4 * i] * seq[i - 1].radius, + x[4 * i], + seq[i].mu_self) + # s/c propagation before the DSM + r, v = propagate_lagrangian( + r_P[i], v_out, x[4 * i + 2] * T[i] * DAY2SEC, common_mu) + plot_kepler(ax, r_P[i], v_out, x[4 * i + 2] * T[i] * DAY2SEC, + common_mu, N=500, color='b', legend=False, units=JR) + # Lambert arc to reach Earth during (1-nu2)*T2 (second segment) + dt = (1 - x[4 * i + 2]) * T[i] * DAY2SEC + l = lambert_problem(r, r_P[i + 1], dt, common_mu, False, False) + plot_lambert(ax, l, sol=0, color='r', legend=False, units=JR, N=500) + v_end_l = l.get_v2()[0] + v_beg_l = l.get_v1()[0] + plt.show() + return ax +mga_part.plot_old = _mga_part_plot_old + +# Plot of concatenated fly-by legs + + +def _part_plot(x, units, ax, seq, start_mjd2000, vinf_in): + """ + Plots the trajectory represented by a decision vector x = [beta,rp,eta,T] * N + associated to a sequence seq, a start_mjd2000 and an incoming vinf_in + """ + from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler + from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC + from math import pi, acos, cos, sin + from scipy.linalg import norm + + legs = len(x) / 4 + common_mu = seq[0].mu_central_body + + # 1 - we 'decode' the chromosome recording the various times of flight + # (days) in the list T + T = x[3::4] + + # 2 - We compute the epochs and ephemerides of the planetary encounters + t_P = list([None] * (legs + 1)) + r_P = list([None] * (legs + 1)) + v_P = list([None] * (legs + 1)) + + for i, planet in enumerate(seq): + t_P[i] = epoch(start_mjd2000 + sum(T[:i])) + r_P[i], v_P[i] = planet.eph(t_P[i]) + plot_planet(ax, planet, t0=t_P[i], color=( + 0.8, 0.6, 0.8), legend=True, units = units) + + v_end_l = [a + b for a, b in zip(v_P[0], vinf_in)] + # 4 - And we iterate on the legs + for i in range(0, legs): + # Fly-by + v_out = fb_prop(v_end_l, + v_P[i], + x[1 + 4 * i] * seq[i].radius, + x[4 * i], + seq[i].mu_self) + # s/c propagation before the DSM + r, v = propagate_lagrangian( + r_P[i], v_out, x[4 * i + 2] * T[i] * DAY2SEC, common_mu) + plot_kepler(ax, r_P[i], v_out, x[4 * i + 2] * T[i] * DAY2SEC, + common_mu, N=500, color='b', legend=False, units=units) + # Lambert arc to reach Earth during (1-nu2)*T2 (second segment) + dt = (1 - x[4 * i + 2]) * T[i] * DAY2SEC + l = lambert_problem(r, r_P[i + 1], dt, common_mu, False, False) + plot_lambert(ax, l, sol=0, color='r', legend=False, units=units, N=500) + v_end_l = l.get_v2()[0] + v_beg_l = l.get_v1()[0] + +# Plot of the trajectory of an mga_part problem + + +def _mga_part_plot(self, x): + """ + Plots the trajectory represented by the decision vector x + + Example:: + + prob.plot(x) + """ + import matplotlib as mpl + from mpl_toolkits.mplot3d import Axes3D + import matplotlib.pyplot as plt + + mpl.rcParams['legend.fontsize'] = 10 + fig = plt.figure() + ax = fig.gca(projection='3d', aspect='equal') + + # Plots the central 'planet'star + ax.scatter(0, 0, 0, color='y') + + JR = 71492000.0 + seq = self.get_sequence() + start_mjd2000 = self.t0.mjd2000 + _part_plot(x, JR, ax, seq, start_mjd2000, self.vinf_in) + return ax mga_part.plot = _mga_part_plot +def _mga_incipit_plot(self, x, plot_leg_0=False): + """ + Plots the trajectory represented by the decision vector x + + Example:: + + prob.plot(x) + """ + import matplotlib as mpl + from mpl_toolkits.mplot3d import Axes3D + import matplotlib.pyplot as plt + from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler + from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC + from math import pi, acos, cos, sin + from scipy.linalg import norm + + mpl.rcParams['legend.fontsize'] = 10 + fig = plt.figure() + ax = fig.gca(projection='3d', aspect='equal') + ax.scatter(0, 0, 0, color='y') + + JR = 71492000.0 + seq = self.get_sequence() + common_mu = seq[0].mu_central_body + r_P, v_P = seq[0].eph(epoch(x[0] + x[3])) + + # 3 - We start with the first leg: a lambert arc + theta = 2 * pi * x[1] + phi = acos(2 * x[2] - 1) - pi / 2 + # phi close to zero is in the moon orbit plane injection + r = [cos(phi) * sin(theta), cos(phi) * cos(theta), sin(phi)] + r = [JR * 1000 * d for d in r] + + l = lambert_problem(r, r_P, x[3] * DAY2SEC, common_mu, False, False) + if (plot_leg_0): + plot_lambert(ax, l, sol=0, color='k', legend=False, units=JR, N=500) + + # Lambert arc to reach seq[1] + v_end_l = l.get_v2()[0] + vinf_in = [a - b for a, b in zip(v_end_l, v_P)] + _part_plot(x[4:], JR, ax, seq, x[0] + x[3], vinf_in) + + return ax +mga_incipit.plot = _mga_incipit_plot + +# Plot of the trajectory for an mga_1dsm problem + + +def _mga_1dsm_tof_plot(self, x): + """ + Plots the trajectory represented by the decision vector x + """ + import matplotlib as mpl + from mpl_toolkits.mplot3d import Axes3D + import matplotlib.pyplot as plt + from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler + from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC + from math import pi, acos, cos, sin + from scipy.linalg import norm + + mpl.rcParams['legend.fontsize'] = 10 + fig = plt.figure() + ax = fig.gca(projection='3d') + ax.scatter(0, 0, 0, color='y') + + seq = self.get_sequence() + + # 2 - We plot the first leg + r_P0, v_P0 = seq[0].eph(epoch(x[0])) + plot_planet(ax, seq[0], t0=epoch(x[0]), color=( + 0.8, 0.6, 0.8), legend=True, units = AU) + r_P1, v_P1 = seq[1].eph(epoch(x[0] + x[5])) + theta = 2 * pi * x[1] + phi = acos(2 * x[2] - 1) - pi / 2 + + Vinfx = x[3] * cos(phi) * cos(theta) + Vinfy = x[3] * cos(phi) * sin(theta) + Vinfz = x[3] * sin(phi) + + v0 = [a + b for a, b in zip(v_P0, [Vinfx, Vinfy, Vinfz])] + r, v = propagate_lagrangian( + r_P0, v0, x[4] * x[5] * DAY2SEC, seq[0].mu_central_body) + plot_kepler( + ax, + r_P0, + v0, + x[4] * + x[5] * + DAY2SEC, + seq[0].mu_central_body, + N=100, + color='b', + legend=False, + units=AU) + + # Lambert arc to reach seq[1] + dt = (1 - x[4]) * x[5] * DAY2SEC + l = lambert_problem(r, r_P1, dt, seq[0].mu_central_body) + plot_lambert(ax, l, sol=0, color='r', legend=False, units=AU) + v_end_l = l.get_v2()[0] + + vinf_in = [a - b for a, b in zip(v_end_l, v_P1)] + _part_plot(x[6:], AU, ax, seq[1:], x[0] + x[5], vinf_in) + return ax +mga_1dsm_tof.plot = _mga_1dsm_tof_plot diff --git a/PyGMO/problem/_mit_spheres.py b/PyGMO/problem/_mit_spheres.py index 9f6735f6..bd68682c 100644 --- a/PyGMO/problem/_mit_spheres.py +++ b/PyGMO/problem/_mit_spheres.py @@ -1,91 +1,110 @@ -def visualize(self,world_states): - """ - - """ - # If Vpython is not detected this method throws an excpetion - try: - import visual as v - except ImportError: - print("error: No Visual Python Detected. This is needed for the visualization method") - raise - import numpy as np - - # We convert the input data in a numpy array - world_states = np.array(world_states) - - # Initializes the display and the initial sphere positions - scene = v.display( - title = "Spheres simulations", - width = 800, - height = 450, - x = 100, y = 0, - visible = True, - autocenter = True, - autoscale = False, - exit = True, - ) - - #Creates the spheres - bodies = [ - v.sphere( - radius = 0.1, - color = v.color.white, - ) - for _ in range(3) - ] - - #Initialize spheres positionss - for sph_id, sph in enumerate(bodies): - sph.pos = world_states[ 0, sph_id*3 + 1 : (sph_id+1)*3 + 1] - - #Creates the trails - trails = [v.curve(color=sc.color) for sc in bodies] - for i in range(3): - trails[i].append(pos=bodies[i].pos) - - dr = [ - np.sqrt((world_states[0,1] - world_states[0,4])**2 + (world_states[0,2] - world_states[0,5])**2 + (world_states[0,3] - world_states[0,6])**2), - np.sqrt((world_states[0,1] - world_states[0,7])**2 + (world_states[0,2] - world_states[0,8])**2 + (world_states[0,2] - world_states[0,9])**2), - np.sqrt((world_states[0,4] - world_states[0,7])**2 + (world_states[0,5] - world_states[0,8])**2 + (world_states[0,6] - world_states[0,9])**2) - ] - - #Creates the labels - time_lbl = v.label(pos = (-9,-5,0), text = "t = " + str(world_states[0,0])) - r_lbl = v.label( pos = (-9,-3,0), text = "r12: " + str(dr[0]) + "\n" + "r13: " + str(dr[1]) + "\n" + "r23: "+ str(dr[2]) ) - - scene.autocenter = False - - # pause before starting animations - v.rate(.5) - - # - for i in xrange( 1, world_states.shape[0] ): - - for sph_id, sph in enumerate(bodies): - sph.pos = world_states[ i, sph_id*3 + 1 : (sph_id+1)*3 + 1] - for j in range(3): - trails[j].append(pos=bodies[j].pos, retain=100) - - dr = [ - np.sqrt((world_states[i,1] - world_states[i,4])**2 + (world_states[i,2] - world_states[i,5])**2 + (world_states[i,3] - world_states[i,6])**2), - np.sqrt((world_states[i,1] - world_states[i,7])**2 + (world_states[i,2] - world_states[i,8])**2 + (world_states[i,3] - world_states[i,9])**2), - np.sqrt((world_states[i,4] - world_states[i,7])**2 + (world_states[i,5] - world_states[i,8])**2 + (world_states[i,6] - world_states[i,9])**2) - ] - - time_lbl.text = "t = " + str(world_states[i,0]) - r_lbl.text = "r12: " + str(dr[0]) + "\n" + "r13: " + str(dr[1]) + "\n" + "r23: "+ str(dr[2]) - v.rate(50) - - scene.visible = False - - for b in bodies: - b.visible = False - - for t in trails: - t.visible=False - - time_lbl.visible = False - r_lbl.visible =False - #del time_lbl - #del r_lbl - #del scene +def visualize(self, world_states): + """ + + """ + # If Vpython is not detected this method throws an excpetion + try: + import visual as v + except ImportError: + print( + "error: No Visual Python Detected. This is needed for the visualization method") + raise + import numpy as np + + # We convert the input data in a numpy array + world_states = np.array(world_states) + + # Initializes the display and the initial sphere positions + scene = v.display( + title="Spheres simulations", + width=800, + height=450, + x=100, y=0, + visible=True, + autocenter=True, + autoscale=False, + exit=True, + ) + + # Creates the spheres + bodies = [ + v.sphere( + radius=0.1, + color=v.color.white, + ) + for _ in range(3) + ] + + # Initialize spheres positionss + for sph_id, sph in enumerate(bodies): + sph.pos = world_states[0, sph_id * 3 + 1: (sph_id + 1) * 3 + 1] + + # Creates the trails + trails = [v.curve(color=sc.color) for sc in bodies] + for i in range(3): + trails[i].append(pos=bodies[i].pos) + + dr = [ + np.sqrt((world_states[0, 1] - world_states[0, 4]) ** 2 + (world_states[0, 2] - + world_states[0, 5]) ** 2 + (world_states[0, 3] - world_states[0, 6]) ** 2), + np.sqrt((world_states[0, 1] - world_states[0, 7]) ** 2 + (world_states[0, 2] - + world_states[0, 8]) ** 2 + (world_states[0, 2] - world_states[0, 9]) ** 2), + np.sqrt((world_states[0, 4] - world_states[0, 7]) ** 2 + (world_states[0, 5] - + world_states[0, 8]) ** 2 + (world_states[0, 6] - world_states[0, 9]) ** 2) + ] + + # Creates the labels + time_lbl = v.label( + pos=(-9, -5, 0), text = "t = " + str(world_states[0, 0])) + r_lbl = v.label(pos=(- + 9, - + 3, 0), text = "r12: " + + str(dr[0]) + + "\n" + + "r13: " + + str(dr[1]) + + "\n" + + "r23: " + + str(dr[2])) + + scene.autocenter = False + + # pause before starting animations + v.rate(.5) + + # + for i in range(1, world_states.shape[0]): + + for sph_id, sph in enumerate(bodies): + sph.pos = world_states[i, sph_id * 3 + 1: (sph_id + 1) * 3 + 1] + for j in range(3): + trails[j].append(pos=bodies[j].pos, retain=100) + + dr = [ + np.sqrt((world_states[i, 1] - world_states[i, 4]) ** 2 + (world_states[i, 2] - + world_states[i, 5]) ** 2 + (world_states[i, 3] - world_states[i, 6]) ** 2), + np.sqrt((world_states[i, 1] - world_states[i, 7]) ** 2 + (world_states[i, 2] - + world_states[i, 8]) ** 2 + (world_states[i, 3] - world_states[i, 9]) ** 2), + np.sqrt((world_states[i, 4] - world_states[i, 7]) ** 2 + (world_states[i, 5] - + world_states[i, 8]) ** 2 + (world_states[i, 6] - world_states[i, 9]) ** 2) + ] + + time_lbl.text = "t = " + str(world_states[i, 0]) + r_lbl.text = "r12: " + \ + str(dr[0]) + "\n" + "r13: " + \ + str(dr[1]) + "\n" + "r23: " + str(dr[2]) + v.rate(50) + + scene.visible = False + + for b in bodies: + b.visible = False + + for t in trails: + t.visible = False + + time_lbl.visible = False + r_lbl.visible = False + #del time_lbl + #del r_lbl + #del scene diff --git a/PyGMO/problem/_mo.py b/PyGMO/problem/_mo.py index 9efbd4a5..8d948ff5 100644 --- a/PyGMO/problem/_mo.py +++ b/PyGMO/problem/_mo.py @@ -1,142 +1,141 @@ -from _problem import zdt, dtlz +from PyGMO.problem._problem import zdt, dtlz -def _mo3d_plot(pop, a=40, comp=[0,1,2]): - """ - Generic plot-method for multi-objective optimization problems with more then 2 objectives - USAGE: prob.plot(pop, comp[0,2,3]) - * pop: population of solutions to the problem - * a: angle of view on which the 3d-plot is created - * comp: indexes the fitness dimension for x,y and z axis in that order - """ - from mpl_toolkits.mplot3d import axes3d - import matplotlib.pyplot as plt - import numpy as np +def _mo3d_plot(self, pop, a=40, comp=[0, 1, 2]): + """ + Generic plot-method for multi-objective optimization problems with more then 2 objectives - fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') + USAGE: prob.plot(pop, comp[0,2,3]) + * pop: population of solutions to the problem + * a: angle of view on which the 3d-plot is created + * comp: indexes the fitness dimension for x,y and z axis in that order + """ + from mpl_toolkits.mplot3d import axes3d + import matplotlib.pyplot as plt + import numpy as np - fit = np.transpose([ind.cur_f for ind in pop]) - try: - ax.plot(fit[comp[0]],fit[comp[1]],fit[comp[2]], 'ro') - except IndexError: - print 'Error. Please choose correct fitness dimensions for printing!' + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') - ax.view_init(azim=a) - plt.show() - return ax + fit = np.transpose([ind.cur_f for ind in pop]) + try: + ax.plot(fit[comp[0]], fit[comp[1]], fit[comp[2]], 'ro') + except IndexError: + print('Error. Please choose correct fitness dimensions for printing!') + ax.view_init(azim=a) + return ax +def _dtlz234_plot(pop, a=40, comp=[0, 1, 2]): + """ + Specific plot-method for the DTLZ2, DTLZ3 and DTLZ4 - plotting also the optimal pareto-front -def _dtlz234_plot(pop, a=40, comp=[0,1,2]): - """ - Specific plot-method for the DTLZ2, DTLZ3 and DTLZ4 - plotting also the optimal pareto-front + USAGE: prob.plot(pop, comp[0,2,3]) - USAGE: prob.plot(pop, comp[0,2,3]) + * pop: population of solutions to the problem - * pop: population of solutions to the problem + * a: angle of view on which the 3d-plot is created - * a: angle of view on which the 3d-plot is created + * comp: indexes the fitness dimension for x,y and z axis in that order + """ - * comp: indexes the fitness dimension for x,y and z axis in that order - """ + from mpl_toolkits.mplot3d import axes3d + import matplotlib.pyplot as plt + import numpy as np - from mpl_toolkits.mplot3d import axes3d - import matplotlib.pyplot as plt - import numpy as np + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') - fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') + # plot the wireframe of the known optimal pareto front + thetas = np.linspace(0, (np.pi / 2.0), 30) + #gammas = np.linspace(-np.pi / 4, np.pi / 4, 30) + gammas = np.linspace(0, (np.pi / 2.0), 30) - # plot the wireframe of the known optimal pareto front - thetas = np.linspace(0, (np.pi / 2.0), 30) - #gammas = np.linspace(-np.pi / 4, np.pi / 4, 30) - gammas = np.linspace(0, (np.pi / 2.0), 30) + x_frame = np.outer(np.cos(thetas), np.cos(gammas)) + y_frame = np.outer(np.cos(thetas), np.sin(gammas)) + z_frame = np.outer(np.sin(thetas), np.ones(np.size(gammas))) - x_frame = np.outer(np.cos(thetas), np.cos(gammas)) - y_frame = np.outer(np.cos(thetas), np.sin(gammas)) - z_frame = np.outer(np.sin(thetas), np.ones(np.size(gammas))) + ax.view_init(azim=a) - ax.view_init(azim=a) + ax.set_autoscalex_on(False) + ax.set_autoscaley_on(False) + ax.set_autoscalez_on(False) - ax.set_autoscalex_on(False) - ax.set_autoscaley_on(False) - ax.set_autoscalez_on(False) + ax.set_xlim(0, 1.8) + ax.set_ylim(0, 1.8) + ax.set_zlim(0, 1.8) - ax.set_xlim(0, 1.8) - ax.set_ylim(0, 1.8) - ax.set_zlim(0, 1.8) + ax.plot_wireframe(x_frame, y_frame, z_frame) - ax.plot_wireframe(x_frame,y_frame,z_frame) + # plot the individuals of the population + fit = np.transpose([ind.cur_f for ind in pop]) + try: + ax.plot(fit[comp[0]], fit[comp[1]], fit[comp[2]], 'ro') + except IndexError: + print('Error. Please choose correct fitness dimensions for printing!') + return ax - # plot the individuals of the population - fit = np.transpose([ind.cur_f for ind in pop]) - try: - ax.plot(fit[comp[0]],fit[comp[1]],fit[comp[2]], 'ro') - except IndexError: - print 'Error. Please choose correct fitness dimensions for printing!' - plt.show() - return ax -def _zdt_ctor(self, prob_id = 1, param_1 = None): - """ - Constructs a multi-objective box-constrained problem from the ZDT testsuite +def _zdt_ctor(self, prob_id=1, param_1=None): + """ + Constructs a multi-objective box-constrained problem from the ZDT testsuite - NOTE: K Deb, A Pratap, S Agarwal: A fast and elitist multiobjective genetic algorithm: NSGA-II, IEEE Transactions on, 2002 + NOTE: K Deb, A Pratap, S Agarwal: A fast and elitist multiobjective genetic algorithm: NSGA-II, IEEE Transactions on, 2002 - USAGE: problem.zdt(prob_id = 1, param_1 = 30) + USAGE: problem.zdt(prob_id = 1, param_1 = 30) - * prob_id: Problem number, one of [1,2,...6] - * param_1: problem dimension for all ZDT problems except ZDT5 (here it is the number of binary strings used) - """ + * prob_id: Problem number, one of [1,2,...6] + * param_1: problem dimension for all ZDT problems except ZDT5 (here it is the number of binary strings used) + """ - arg_list=[] - arg_list.append(prob_id) - if param_1 == None: - if prob_id in [1,2,3,4]: - arg_list.append(30) - elif prob_id == 5: - arg_list.append(11) - else: - arg_list.append(10) - else: - arg_list.append(param_1) - self._orig_init(*arg_list) + arg_list = [] + arg_list.append(prob_id) + if param_1 is None: + if prob_id in [1, 2, 3, 4]: + arg_list.append(30) + elif prob_id == 5: + arg_list.append(11) + else: + arg_list.append(10) + else: + arg_list.append(param_1) + self._orig_init(*arg_list) zdt._orig_init = zdt.__init__ zdt.__init__ = _zdt_ctor -def _dtlz_ctor(self, prob_id = 1, k = None, fdim = 3, alpha = 100): - """ - Constructs a multi-objective box-constrained problem from the DTLZ testsuite - - NOTE: K Deb, L Thiele, M Laumanns, E Zitzler, Scalable test problems for evolutionary multiobjective optimization - - USAGE: problem.dtlz(prob_id = 1, k = 20, fdim = 4) - - * prob_id: Problem number, one of [1,2,...7] - * k: paramter defining integer dimension of the problem: k + fdim - 1 - * fdim: number of objectives - * alpha: controls density of solutions (just used for prob_id = 4) - """ - - arg_list=[] - arg_list.append(prob_id) - if k == None: - if prob_id == 1: - arg_list.append(5) - elif prob_id in [2,3,4,5,6]: - arg_list.append(10) - else: - arg_list.append(20) - else: - arg_list.append(k) - arg_list.append(fdim) - arg_list.append(alpha) - self._orig_init(*arg_list) - if prob_id in [2,3,4]: - self.plot = _dtlz234_plot + +def _dtlz_ctor(self, prob_id=1, k=None, fdim=3, alpha=100): + """ + Constructs a multi-objective box-constrained problem from the DTLZ testsuite + +NOTE: K Deb, L Thiele, M Laumanns, E Zitzler, Scalable test problems for evolutionary multiobjective optimization + + USAGE: problem.dtlz(prob_id = 1, k = 20, fdim = 4) + + * prob_id: Problem number, one of [1,2,...7] + * k: paramter defining integer dimension of the problem: k + fdim - 1 + * fdim: number of objectives + * alpha: controls density of solutions (just used for prob_id = 4) + """ + + arg_list = [] + arg_list.append(prob_id) + if k is None: + if prob_id == 1: + arg_list.append(5) + elif prob_id in [2, 3, 4, 5, 6]: + arg_list.append(10) + else: + arg_list.append(20) + else: + arg_list.append(k) + arg_list.append(fdim) + arg_list.append(alpha) + self._orig_init(*arg_list) + if prob_id in [2, 3, 4]: + self.plot = _dtlz234_plot dtlz.plot = _mo3d_plot dtlz._orig_init = dtlz.__init__ diff --git a/PyGMO/problem/_pl2pl.py b/PyGMO/problem/_pl2pl.py index 342f6b35..57daaa4e 100644 --- a/PyGMO/problem/_pl2pl.py +++ b/PyGMO/problem/_pl2pl.py @@ -1,128 +1,160 @@ -from _base import base - +from PyGMO.problem._base import base class py_pl2pl(base): - """ - This problem represents a low-thrust transfer between a departure planet (default is the earth) - and a target planet (default is mars). The spacecraft is described - by its starting mass (mass) its engine specific impulse (Isp) and its engine maximum thrust (Tmax). The - Sims-Flanagan model is used to describe a trajectory. A variable number of segments (nseg) can be used - An initial velocity with respect to the Earth is allowed (Vinf_0) assumed to be given by the launcher - A final velocity wrt the target planet is also allowed (Vinf_f) - The method high_fidelity allows to use a continuous thrust model rather than impulses - """ - - def __init__(self,mass=1000,Tmax=0.05,Isp=2500,Vinf_0=3,Vinf_f=1e-12,nseg=10,departure = None, target = None, optimise4mass = False): - """ - Constructs a low-thrust transfer between a departure planet and a target planet (Constrained Continuous Single-Objective) - - NOTE: An impulsive transcription is used to transform into an NLP the Optimal Control Problem - - USAGE: problem.py_pl2pl(self,mass=1000,Tmax=0.05,Isp=2500,Vinf_0=3,Vinf_f=0,nseg=10,departure = PyKEP.planet_ss('earth'), target = PyKEP.planet_ss('mars')) - - * mass: spacecraft mass at departure [kg] - * Tmax: maximum allowed thrust [N] - * Isp: spacecarft engine specific impulse [Isp] - * Vinf_0: allowed maximum starting velocity [km/s] - * Vinf_f: allowed maximum arrival velocity [km/s] - (if negative it is interpreted as a minimum arrival velocity) - * nseg: number of segments used for the impulsive transcription - * departure: departure planet (a PyKEP planet) - * target: arrival planet (a PyKEP planet) - """ - try: - import PyKEP - except ImportError: - raise ImportError("Error while trying 'import PyKEP': is PyKEP installed?") - if departure is None: - departure = PyKEP.planet_ss('earth') - if target is None: - target = mars = PyKEP.planet_ss('mars') - super(py_pl2pl,self).__init__(9 + nseg*3,0,1,9 + nseg,nseg+2,1e-5) - self.__departure = departure - self.__target = target - self.__sc = PyKEP.sims_flanagan.spacecraft(mass,Tmax,Isp) - self.__Vinf_0 = Vinf_0*1000 - self.__Vinf_f = Vinf_f*1000 - self.__leg = PyKEP.sims_flanagan.leg() - self.__leg.set_mu(PyKEP.MU_SUN) - self.__leg.set_spacecraft(self.__sc) - self.__nseg = nseg - self.set_bounds([0,10,self.__sc.mass/10,-abs(self.__Vinf_0),-abs(self.__Vinf_0),-abs(self.__Vinf_0),-abs(self.__Vinf_f),-abs(self.__Vinf_f),-abs(self.__Vinf_f)] + [-1] * 3 *nseg,[3000,1500,self.__sc.mass,abs(self.__Vinf_0),abs(self.__Vinf_0),abs(self.__Vinf_0),abs(self.__Vinf_f),abs(self.__Vinf_f),abs(self.__Vinf_f)] + [1] * 3 * nseg) - self.__optimise4mass = optimise4mass - def _objfun_impl(self,x): - if (self.__optimise4mass): - return (-x[2],) - else: - return(x[1],) - def _compute_constraints_impl(self,x): - import PyKEP - start = PyKEP.epoch(x[0]) - end = PyKEP.epoch(x[0] + x[1]) - - #Computing starting spaceraft state - r,v = self.__departure.eph(start) - v_list = list(v) - v_list[0] += x[3] - v_list[1] += x[4] - v_list[2] += x[5] - x0 = PyKEP.sims_flanagan.sc_state(r,v_list,self.__sc.mass) - - #Computing ending spaceraft state - r,v = self.__target.eph(end) - v_list = list(v) - v_list[0] += x[6] - v_list[1] += x[7] - v_list[2] += x[8] - xe = PyKEP.sims_flanagan.sc_state(r, v_list ,x[2]) - - #Building the SF leg - self.__leg.set(start,x0,x[-3 * self.__nseg:],end,xe) - - #Computing Vinf constraints (careful here, the weights do count). In case of a larger than constarint - #a factor of 100 has been added - if (self.__Vinf_0 >= 0): - v_inf_con_0 = (x[3] * x[3] + x[4] * x[4] + x[5] * x[5] - self.__Vinf_0 * self.__Vinf_0) / (PyKEP.EARTH_VELOCITY * PyKEP.EARTH_VELOCITY) - else: - v_inf_con_0 = - 100 * (x[3] * x[3] + x[4] * x[4] + x[5] * x[5] - self.__Vinf_0 * self.__Vinf_0) / (PyKEP.EARTH_VELOCITY * PyKEP.EARTH_VELOCITY) - if (self.__Vinf_f >= 0): - v_inf_con_f = (x[6] * x[6] + x[7] * x[7] + x[8] * x[8] - self.__Vinf_f * self.__Vinf_f) / (PyKEP.EARTH_VELOCITY * PyKEP.EARTH_VELOCITY) - else: - v_inf_con_f = - 100 *(x[6] * x[6] + x[7] * x[7] + x[8] * x[8] - self.__Vinf_f * self.__Vinf_f) / (PyKEP.EARTH_VELOCITY * PyKEP.EARTH_VELOCITY) - - #Setting all constraints - retval = list(self.__leg.mismatch_constraints() + self.__leg.throttles_constraints()) + [v_inf_con_0] + [v_inf_con_f] - retval[0] /= PyKEP.AU - retval[1] /= PyKEP.AU - retval[2] /= PyKEP.AU - retval[3] /= PyKEP.EARTH_VELOCITY - retval[4] /= PyKEP.EARTH_VELOCITY - retval[5] /= PyKEP.EARTH_VELOCITY - retval[6] /= self.__sc.mass - return retval - def pretty(self,x): - """Decodes the decision vector x""" - import PyKEP - start = PyKEP.epoch(x[0]) - end = PyKEP.epoch(x[0] + x[1]) - r,v = self.__departure.eph(start) - v_list = list(v) - v_list[0] += x[3] - v_list[1] += x[4] - v_list[2] += x[5] - x0 = PyKEP.sims_flanagan.sc_state(r,v_list,self.__sc.mass) - r,v = self.__target.eph(end) - xe = PyKEP.sims_flanagan.sc_state(r, v ,x[2]) - self.__leg.set(start,x0,x[-3 * self.__nseg:],end,xe) - print("A direct interplantary transfer\n") - print("FROM:") - print(self.__departure) - print("TO:") - print(self.__target) - print(self.__leg) - def get_hf(self): - return self.__leg.high_fidelity - def set_hf(self,state): - self.__leg.high_fidelity = state - high_fidelity = property(get_hf,set_hf) + + """ + This problem represents a low-thrust transfer between a departure planet (default is the earth) + and a target planet (default is mars). The spacecraft is described + by its starting mass (mass) its engine specific impulse (Isp) and its engine maximum thrust (Tmax). The + Sims-Flanagan model is used to describe a trajectory. A variable number of segments (nseg) can be used + An initial velocity with respect to the Earth is allowed (Vinf_0) assumed to be given by the launcher + A final velocity wrt the target planet is also allowed (Vinf_f) + The method high_fidelity allows to use a continuous thrust model rather than impulses + """ + + def __init__( + self, + mass=1000, + Tmax=0.05, + Isp=2500, + Vinf_0=3, + Vinf_f=1e-12, + nseg=10, + departure=None, + target=None, + optimise4mass=False): + """ + Constructs a low-thrust transfer between a departure planet and a target planet (Constrained Continuous Single-Objective) + + NOTE: An impulsive transcription is used to transform into an NLP the Optimal Control Problem + + USAGE: problem.py_pl2pl(self,mass=1000,Tmax=0.05,Isp=2500,Vinf_0=3,Vinf_f=0,nseg=10,departure = PyKEP.planet_ss('earth'), target = PyKEP.planet_ss('mars')) + + * mass: spacecraft mass at departure [kg] + * Tmax: maximum allowed thrust [N] + * Isp: spacecarft engine specific impulse [Isp] + * Vinf_0: allowed maximum starting velocity [km/s] + * Vinf_f: allowed maximum arrival velocity [km/s] + (if negative it is interpreted as a minimum arrival velocity) + * nseg: number of segments used for the impulsive transcription + * departure: departure planet (a PyKEP planet) + * target: arrival planet (a PyKEP planet) + """ + try: + import PyKEP + except ImportError: + raise ImportError( + "Error while trying 'import PyKEP': is PyKEP installed?") + if departure is None: + departure = PyKEP.planet_ss('earth') + if target is None: + target = mars = PyKEP.planet_ss('mars') + super(py_pl2pl, self).__init__( + 9 + nseg * 3, 0, 1, 9 + nseg, nseg + 2, 1e-5) + self.__departure = departure + self.__target = target + self.__sc = PyKEP.sims_flanagan.spacecraft(mass, Tmax, Isp) + self.__Vinf_0 = Vinf_0 * 1000 + self.__Vinf_f = Vinf_f * 1000 + self.__leg = PyKEP.sims_flanagan.leg() + self.__leg.set_mu(PyKEP.MU_SUN) + self.__leg.set_spacecraft(self.__sc) + self.__nseg = nseg + self.set_bounds( + [0, 10, self.__sc.mass / 10, -abs(self.__Vinf_0), -abs(self. + __Vinf_0), -abs(self.__Vinf_0), -abs(self.__Vinf_f), -abs(self. + __Vinf_f), -abs(self.__Vinf_f)] + [-1] * 3 * nseg, + [3000, 1500, self.__sc.mass, abs(self.__Vinf_0), + abs(self.__Vinf_0), abs(self.__Vinf_0), abs(self.__Vinf_f), abs( + self.__Vinf_f), abs(self.__Vinf_f)] + [1] * 3 * nseg) + self.__optimise4mass = optimise4mass + + def _objfun_impl(self, x): + if (self.__optimise4mass): + return (-x[2],) + else: + return(x[1],) + + def _compute_constraints_impl(self, x): + import PyKEP + start = PyKEP.epoch(x[0]) + end = PyKEP.epoch(x[0] + x[1]) + + # Computing starting spaceraft state + r, v = self.__departure.eph(start) + v_list = list(v) + v_list[0] += x[3] + v_list[1] += x[4] + v_list[2] += x[5] + x0 = PyKEP.sims_flanagan.sc_state(r, v_list, self.__sc.mass) + + # Computing ending spaceraft state + r, v = self.__target.eph(end) + v_list = list(v) + v_list[0] += x[6] + v_list[1] += x[7] + v_list[2] += x[8] + xe = PyKEP.sims_flanagan.sc_state(r, v_list, x[2]) + + # Building the SF leg + self.__leg.set(start, x0, x[-3 * self.__nseg:], end, xe) + + # Computing Vinf constraints (careful here, the weights do count). In case of a larger than constarint + # a factor of 100 has been added + if (self.__Vinf_0 >= 0): + v_inf_con_0 = (x[3] * x[3] + x[4] * x[4] + x[5] * x[5] - self.__Vinf_0 * + self.__Vinf_0) / (PyKEP.EARTH_VELOCITY * PyKEP. + EARTH_VELOCITY) + else: + v_inf_con_0 = -100 * (x[3] * x[3] + x[4] * x[4] + x[5] * x[5] - self. + __Vinf_0 * self.__Vinf_0) / (PyKEP. + EARTH_VELOCITY * PyKEP.EARTH_VELOCITY) + if (self.__Vinf_f >= 0): + v_inf_con_f = (x[6] * x[6] + x[7] * x[7] + x[8] * x[8] - self.__Vinf_f * + self.__Vinf_f) / (PyKEP.EARTH_VELOCITY * PyKEP. + EARTH_VELOCITY) + else: + v_inf_con_f = -100 * (x[6] * x[6] + x[7] * x[7] + x[8] * x[8] - self. + __Vinf_f * self.__Vinf_f) / (PyKEP. + EARTH_VELOCITY * PyKEP.EARTH_VELOCITY) + + # Setting all constraints + retval = list(self.__leg.mismatch_constraints( + ) + self.__leg.throttles_constraints()) + [v_inf_con_0] + [v_inf_con_f] + retval[0] /= PyKEP.AU + retval[1] /= PyKEP.AU + retval[2] /= PyKEP.AU + retval[3] /= PyKEP.EARTH_VELOCITY + retval[4] /= PyKEP.EARTH_VELOCITY + retval[5] /= PyKEP.EARTH_VELOCITY + retval[6] /= self.__sc.mass + return retval + + def pretty(self, x): + """Decodes the decision vector x""" + import PyKEP + start = PyKEP.epoch(x[0]) + end = PyKEP.epoch(x[0] + x[1]) + r, v = self.__departure.eph(start) + v_list = list(v) + v_list[0] += x[3] + v_list[1] += x[4] + v_list[2] += x[5] + x0 = PyKEP.sims_flanagan.sc_state(r, v_list, self.__sc.mass) + r, v = self.__target.eph(end) + xe = PyKEP.sims_flanagan.sc_state(r, v, x[2]) + self.__leg.set(start, x0, x[-3 * self.__nseg:], end, xe) + print("A direct interplantary transfer\n") + print("FROM:") + print(self.__departure) + print("TO:") + print(self.__target) + print(self.__leg) + + def get_hf(self): + return self.__leg.high_fidelity + + def set_hf(self, state): + self.__leg.high_fidelity = state + high_fidelity = property(get_hf, set_hf) diff --git a/PyGMO/problem/_spheres_q.py b/PyGMO/problem/_spheres_q.py index 3bfc3151..50f72f33 100644 --- a/PyGMO/problem/_spheres_q.py +++ b/PyGMO/problem/_spheres_q.py @@ -1,88 +1,107 @@ -def visualize(self,world_states): - # If Vpython is not detected this method throws an excpetion - try: - import visual as v - except ImportError: - print("error: No Visual Python Detected. This is needed for the visualization method") - raise - import numpy as np - - # We convert the input data in a numpy array - world_states = np.array(world_states) - - # Initializes the display and the initial sphere positions - scene = v.display( - title = "Spheres simulations", - width = 800, - height = 450, - x = 100, y = 0, - visible = True, - autocenter = True, - autoscale = False, - exit = True, - ) - - #Creates the spheres - bodies = [ - v.sphere( - radius = 0.1, - color = v.color.white, - ) - for _ in range(3) - ] - - #Initialize spheres positionss - for sph_id, sph in enumerate(bodies): - sph.pos = world_states[ 0, sph_id*3 + 1 : (sph_id+1)*3 + 1] - - #Creates the trails - trails = [v.curve(color=sc.color) for sc in bodies] - for i in range(3): - trails[i].append(pos=bodies[i].pos) - - dr = [ - np.sqrt((world_states[0,1] - world_states[0,4])**2 + (world_states[0,2] - world_states[0,5])**2 + (world_states[0,3] - world_states[0,6])**2), - np.sqrt((world_states[0,1] - world_states[0,7])**2 + (world_states[0,2] - world_states[0,8])**2 + (world_states[0,2] - world_states[0,9])**2), - np.sqrt((world_states[0,4] - world_states[0,7])**2 + (world_states[0,5] - world_states[0,8])**2 + (world_states[0,6] - world_states[0,9])**2) - ] - - #Creates the labels - time_lbl = v.label(pos = (-9,-5,0), text = "t = " + str(world_states[0,0])) - r_lbl = v.label( pos = (-9,-3,0), text = "r12: " + str(dr[0]) + "\n" + "r13: " + str(dr[1]) + "\n" + "r23: "+ str(dr[2]) ) - - scene.autocenter = False - - # pause before starting animations - v.rate(.5) - - # - for i in xrange( 1, world_states.shape[0] ): - - for sph_id, sph in enumerate(bodies): - sph.pos = world_states[ i, sph_id*3 + 1 : (sph_id+1)*3 + 1] - for j in range(3): - trails[j].append(pos=bodies[j].pos, retain=100) - - dr = [ - np.sqrt((world_states[i,1] - world_states[i,4])**2 + (world_states[i,2] - world_states[i,5])**2 + (world_states[i,3] - world_states[i,6])**2), - np.sqrt((world_states[i,1] - world_states[i,7])**2 + (world_states[i,2] - world_states[i,8])**2 + (world_states[i,3] - world_states[i,9])**2), - np.sqrt((world_states[i,4] - world_states[i,7])**2 + (world_states[i,5] - world_states[i,8])**2 + (world_states[i,6] - world_states[i,9])**2) - ] - - time_lbl.text = "t = " + str(world_states[i,0]) - r_lbl.text = "r12: " + str(dr[0]) + "\n" + "r13: " + str(dr[1]) + "\n" + "r23: "+ str(dr[2]) - v.rate(50) - - scene.visible = False - - for b in bodies: - b.visible = False - - for t in trails: - t.visible=False - - time_lbl.visible = False - r_lbl.visible =False - #del time_lbl - #del r_lbl - #del scene +def visualize(self, world_states): + # If Vpython is not detected this method throws an excpetion + try: + import visual as v + except ImportError: + print( + "error: No Visual Python Detected. This is needed for the visualization method") + raise + import numpy as np + + # We convert the input data in a numpy array + world_states = np.array(world_states) + + # Initializes the display and the initial sphere positions + scene = v.display( + title="Spheres simulations", + width=800, + height=450, + x=100, y=0, + visible=True, + autocenter=True, + autoscale=False, + exit=True, + ) + + # Creates the spheres + bodies = [ + v.sphere( + radius=0.1, + color=v.color.white, + ) + for _ in range(3) + ] + + # Initialize spheres positionss + for sph_id, sph in enumerate(bodies): + sph.pos = world_states[0, sph_id * 3 + 1: (sph_id + 1) * 3 + 1] + + # Creates the trails + trails = [v.curve(color=sc.color) for sc in bodies] + for i in range(3): + trails[i].append(pos=bodies[i].pos) + + dr = [ + np.sqrt((world_states[0, 1] - world_states[0, 4]) ** 2 + (world_states[0, 2] - + world_states[0, 5]) ** 2 + (world_states[0, 3] - world_states[0, 6]) ** 2), + np.sqrt((world_states[0, 1] - world_states[0, 7]) ** 2 + (world_states[0, 2] - + world_states[0, 8]) ** 2 + (world_states[0, 2] - world_states[0, 9]) ** 2), + np.sqrt((world_states[0, 4] - world_states[0, 7]) ** 2 + (world_states[0, 5] - + world_states[0, 8]) ** 2 + (world_states[0, 6] - world_states[0, 9]) ** 2) + ] + + # Creates the labels + time_lbl = v.label( + pos=(-9, -5, 0), text = "t = " + str(world_states[0, 0])) + r_lbl = v.label(pos=(- + 9, - + 3, 0), text = "r12: " + + str(dr[0]) + + "\n" + + "r13: " + + str(dr[1]) + + "\n" + + "r23: " + + str(dr[2])) + + scene.autocenter = False + + # pause before starting animations + v.rate(.5) + + # + for i in range(1, world_states.shape[0]): + + for sph_id, sph in enumerate(bodies): + sph.pos = world_states[i, sph_id * 3 + 1: (sph_id + 1) * 3 + 1] + for j in range(3): + trails[j].append(pos=bodies[j].pos, retain=100) + + dr = [ + np.sqrt((world_states[i, 1] - world_states[i, 4]) ** 2 + (world_states[i, 2] - + world_states[i, 5]) ** 2 + (world_states[i, 3] - world_states[i, 6]) ** 2), + np.sqrt((world_states[i, 1] - world_states[i, 7]) ** 2 + (world_states[i, 2] - + world_states[i, 8]) ** 2 + (world_states[i, 3] - world_states[i, 9]) ** 2), + np.sqrt((world_states[i, 4] - world_states[i, 7]) ** 2 + (world_states[i, 5] - + world_states[i, 8]) ** 2 + (world_states[i, 6] - world_states[i, 9]) ** 2) + ] + + time_lbl.text = "t = " + str(world_states[i, 0]) + r_lbl.text = "r12: " + \ + str(dr[0]) + "\n" + "r13: " + \ + str(dr[1]) + "\n" + "r23: " + str(dr[2]) + v.rate(50) + + scene.visible = False + + for b in bodies: + b.visible = False + + for t in trails: + t.visible = False + + time_lbl.visible = False + r_lbl.visible = False + #del time_lbl + #del r_lbl + #del scene diff --git a/PyGMO/problem/problem.cpp b/PyGMO/problem/problem.cpp index 6dfdb826..c2684112 100644 --- a/PyGMO/problem/problem.cpp +++ b/PyGMO/problem/problem.cpp @@ -67,6 +67,13 @@ std::vector > get_rotation_matrix_from_eigen(const problem:: return retval; } +// wrapper of a decompose method +static inline fitness_vector compute_decomposed_fitness_wrapper(const problem::decompose &p, const fitness_vector &original_fit, const fitness_vector &weights) { + fitness_vector retval(1); + p.compute_decomposed_fitness(retval,original_fit,weights); + return retval; +} + // Wrapper to expose problems. template static inline class_ > problem_wrapper(const char *name, const char *descr) @@ -111,7 +118,6 @@ static inline class_,bases > me return retval; } - // Wrapper to expose unconstrained multi-objective problems. template static inline class_,bases > unc_mo_problem_wrapper(const char *name, const char *descr) @@ -200,6 +206,8 @@ BOOST_PYTHON_MODULE(_problem) { .add_property("best_x",make_function(&problem::base::get_best_x,return_value_policy()), best_x_setter(&problem::base::set_best_x), "Best known decision vector(s).") .add_property("best_f",make_function(&problem::base::get_best_f,return_value_policy()),"Best known fitness vector(s).") .add_property("best_c",make_function(&problem::base::get_best_c,return_value_policy()),"Best known constraints vector(s).") + .add_property("fevals",&problem::base::get_fevals,"Number of function evaluations.") + .add_property("cevals",&problem::base::get_cevals,"Number of constraints evaluations.") .def_pickle(python_class_pickle_suite()); // Expose base stochastic problem class, including the virtual methods. Here we explicitly @@ -412,14 +420,20 @@ BOOST_PYTHON_MODULE(_problem) { .def(init >()) .def(init()) .add_property("shift_vector",make_function(&problem::shifted::get_shift_vector,return_value_policy())) - .add_property("deshift",&problem::shifted::deshift); + .def("deshift",&problem::shifted::deshift); + + // Scaled meta-problem + meta_problem_wrapper("scaled","Scaled problem") + .def(init()) + .add_property("units",make_function(&problem::scaled::get_units,return_value_policy())) + .def("descale",&problem::scaled::descale); // Rotated meta-problem meta_problem_wrapper("rotated","Rotated problem") .def(init()) .def(init()) .add_property("rotation_matrix",&get_rotation_matrix_from_eigen) - .add_property("derotate",&problem::rotated::derotate); + .def("derotate",&problem::rotated::derotate); // Normalized meta-problem meta_problem_wrapper("normalized","Normalized problem") @@ -433,8 +447,15 @@ BOOST_PYTHON_MODULE(_problem) { .value("TCHEBYCHEFF", problem::decompose::TCHEBYCHEFF) .value("BI", problem::decompose::BI); meta_problem_wrapper("decompose","Decomposed problem") - .def(init &, const std::vector &> >()) - .add_property("weights", make_function(&problem::decompose::get_weights, return_value_policy())); + .def(init &, const std::vector &, const bool> >()) + .def("compute_decomposed_fitness", &compute_decomposed_fitness_wrapper, + "Computes the fitness of the decomposed problem\n\n" + " USAGE:: w = prob.compute_decomposed_fitness(fit,weight)\n" + " - fit: multi-dimensional fitness\n" + " - weight: decomposition weights") + .add_property("weights", make_function(&problem::decompose::get_weights, return_value_policy())) + .add_property("ideal_point", &problem::decompose::get_ideal_point, &problem::decompose::set_ideal_point,"the (z) point used to compute tchebycheff and bi decompositions"); + // Noisy meta-problem // Exposing enums of problem::noisy @@ -452,6 +473,13 @@ BOOST_PYTHON_MODULE(_problem) { .def(init()) .add_property("rho", &problem::robust::get_rho); + + // Quadrature encoding problem + problem_wrapper("quadrature_encoding", "Quadrature encoding problem") + .def(init &>()) + .add_property("transform2old", &problem::quadrature_encoding::transform2old) + .add_property("transform2new", &problem::quadrature_encoding::transform2new); + #ifdef PAGMO_ENABLE_KEP_TOOLBOX // Asteroid Sample Return (also used fot human missions to asteroids) // problem_wrapper("sample_return","Asteroid sample return problem.") diff --git a/PyGMO/problem/python_base.h b/PyGMO/problem/python_base.h index bf52e33f..2733e0e3 100644 --- a/PyGMO/problem/python_base.h +++ b/PyGMO/problem/python_base.h @@ -241,6 +241,6 @@ inline void load_construct_data(Archive &ar, pagmo::problem::python_base *prob, }} //namespaces -BOOST_CLASS_EXPORT(pagmo::problem::python_base); +BOOST_CLASS_EXPORT(pagmo::problem::python_base) #endif diff --git a/PyGMO/problem/python_base_stochastic.h b/PyGMO/problem/python_base_stochastic.h index 2825d1b1..df75b6c1 100644 --- a/PyGMO/problem/python_base_stochastic.h +++ b/PyGMO/problem/python_base_stochastic.h @@ -235,6 +235,6 @@ inline void load_construct_data(Archive &ar, pagmo::problem::python_base_stochas }} //namespaces -BOOST_CLASS_EXPORT(pagmo::problem::python_base_stochastic); +BOOST_CLASS_EXPORT(pagmo::problem::python_base_stochastic) #endif diff --git a/PyGMO/test/__init__.py b/PyGMO/test/__init__.py index e726d471..e87bd00b 100644 --- a/PyGMO/test/__init__.py +++ b/PyGMO/test/__init__.py @@ -23,93 +23,108 @@ import unittest as _ut + class _serialization_test(_ut.TestCase): - def test_pickle(self): - from PyGMO import archipelago, island_list, problem_list, algorithm_list, problem - import pickle - from copy import deepcopy - # We remove some problems that cannot be constructed without external txt data files - prob_list = deepcopy(problem_list) - prob_list.remove(problem.cec2013) - print('') - for isl in island_list: - for prob in prob_list: - for algo in algorithm_list: - print(isl,type(prob()),type(algo())) - a = archipelago() - a.push_back(isl(algo(),prob(),20)) - a.push_back(isl(algo(),prob(),20)) - pickle.loads(pickle.dumps(a)) - -# This class will stress the island and archipelago classes with highly concurrent simple evolutions. + + def test_pickle(self): + from PyGMO import archipelago, island_list, problem_list, algorithm_list, problem + import pickle + from copy import deepcopy + # We remove some problems that cannot be constructed without external + # txt data files + prob_list = deepcopy(problem_list) + prob_list.remove(problem.cec2013) + print('') + for isl in island_list: + for prob in prob_list: + for algo in algorithm_list: + print(isl, type(prob()), type(algo())) + a = archipelago() + a.push_back(isl(algo(), prob(), 20)) + a.push_back(isl(algo(), prob(), 20)) + pickle.loads(pickle.dumps(a)) + +# This class will stress the island and archipelago classes with highly +# concurrent simple evolutions. + + class _island_torture_test(_ut.TestCase): - def __test_impl(self,isl_type,algo,prob): - from PyGMO import archipelago, topology - a = archipelago(topology = topology.ring()) - for i in range(0,100): - a.push_back(isl_type(algo,prob,6)) - a.evolve(10) - a.join() - def test_local_island(self): - from PyGMO import local_island, algorithm, problem - isl_type = local_island - algo_list = [algorithm.py_example(1), algorithm.de(5)] - prob_list = [problem.py_example(), problem.dejong(1)] - for algo in algo_list: - for prob in prob_list: - self.__test_impl(isl_type,algo,prob) - def test_py_island(self): - from PyGMO import py_island, algorithm, problem - isl_type = py_island - algo_list = [algorithm.py_example(1), algorithm.de(5)] - prob_list = [problem.py_example(), problem.dejong(1)] - for algo in algo_list: - for prob in prob_list: - self.__test_impl(isl_type,algo,prob) - def test_ipy_island(self): - from PyGMO import ipy_island, algorithm, problem - try: - from IPython.kernel import client - mec = client.MultiEngineClient() - if len(mec) == 0: - raise RuntimeError() - except ImportError as ie: - return - except BaseException as e: - print('\nThere is a problem with parallel IPython setup. The error message is:') - print(e) - print('Tests for ipy_island will not be run.') - return - isl_type = ipy_island - algo_list = [algorithm.py_example(1), algorithm.de(5)] - prob_list = [problem.py_example(), problem.dejong(1)] - for algo in algo_list: - for prob in prob_list: - self.__test_impl(isl_type,algo,prob) + + def __test_impl(self, isl_type, algo, prob): + from PyGMO import archipelago, topology + a = archipelago(topology=topology.ring()) + for i in range(0, 100): + a.push_back(isl_type(algo, prob, 6)) + a.evolve(10) + a.join() + + def test_local_island(self): + from PyGMO import local_island, algorithm, problem + isl_type = local_island + algo_list = [algorithm.py_example(1), algorithm.de(5)] + prob_list = [problem.py_example(), problem.dejong(1)] + for algo in algo_list: + for prob in prob_list: + self.__test_impl(isl_type, algo, prob) + + def test_py_island(self): + from PyGMO import py_island, algorithm, problem + isl_type = py_island + algo_list = [algorithm.py_example(1), algorithm.de(5)] + prob_list = [problem.py_example(), problem.dejong(1)] + for algo in algo_list: + for prob in prob_list: + self.__test_impl(isl_type, algo, prob) + + def test_ipy_island(self): + from PyGMO import ipy_island, algorithm, problem + try: + from IPython.kernel import client + mec = client.MultiEngineClient() + if len(mec) == 0: + raise RuntimeError() + except ImportError as ie: + return + except BaseException as e: + print( + '\nThere is a problem with parallel IPython setup. The error message is:') + print(e) + print('Tests for ipy_island will not be run.') + return + isl_type = ipy_island + algo_list = [algorithm.py_example(1), algorithm.de(5)] + prob_list = [problem.py_example(), problem.dejong(1)] + for algo in algo_list: + for prob in prob_list: + self.__test_impl(isl_type, algo, prob) + def run_serialization_test_suite(): - """Run the serialization test suite.""" - from PyGMO import test - suite = _ut.TestLoader().loadTestsFromTestCase(_serialization_test) - _ut.TextTestRunner(verbosity = 2).run(suite) + """Run the serialization test suite.""" + from PyGMO import test + suite = _ut.TestLoader().loadTestsFromTestCase(_serialization_test) + _ut.TextTestRunner(verbosity=2).run(suite) + def run_island_torture_test_suite(): - """Run the island torture test suite.""" - from PyGMO import test - suite = _ut.TestLoader().loadTestsFromTestCase(_island_torture_test) - _ut.TextTestRunner(verbosity = 2).run(suite) + """Run the island torture test suite.""" + from PyGMO import test + suite = _ut.TestLoader().loadTestsFromTestCase(_island_torture_test) + _ut.TextTestRunner(verbosity=2).run(suite) + def run_full_test_suite(): - """Run the complete test suite for PyGMO.""" - from PyGMO import test - from _hypervolume_tests import get_hv_suite - suite = _ut.TestLoader().loadTestsFromModule(test) - # Add hypervolume suite explicitly - suite.addTests(get_hv_suite()) + """Run the complete test suite for PyGMO.""" + from PyGMO import test + from PyGMO.test._hypervolume_tests import get_hv_suite + suite = _ut.TestLoader().loadTestsFromModule(test) + # Add hypervolume suite explicitly + suite.addTests(get_hv_suite()) + + _ut.TextTestRunner(verbosity=2).run(suite) - _ut.TextTestRunner(verbosity = 2).run(suite) def run_hv_test_suite(): - """Run the hypervolume test suite.""" - from _hypervolume_tests import get_hv_suite - _ut.TextTestRunner(verbosity = 2).run(get_hv_suite()) + """Run the hypervolume test suite.""" + from PyGMO.test._hypervolume_tests import get_hv_suite + _ut.TextTestRunner(verbosity=2).run(get_hv_suite()) diff --git a/PyGMO/test/_hypervolume_tests.py b/PyGMO/test/_hypervolume_tests.py index 1fd26cf5..22d4cf74 100644 --- a/PyGMO/test/_hypervolume_tests.py +++ b/PyGMO/test/_hypervolume_tests.py @@ -1,392 +1,569 @@ -from PyGMO import * -from PyGMO.util import * +from PyGMO import population, problem +from PyGMO.util import hypervolume, hv_algorithm import unittest + class HVCtorTest(unittest.TestCase): - def setUp(self): - self.good_ps_2d = [[5,2],[3,4],[1,5]] - - def test_dimension_size(self): - # fixed points dimensions should be reasonable - self.assertRaises(ValueError, hypervolume, [[1,], [2,]]) # points of f_dim = 1 - self.assertRaises(ValueError, hypervolume, [[],[],]) # points of f_dim = 0 - self.assertRaises(ValueError, hypervolume, [[],]) # empty set of points - self.assertRaises(ValueError, hypervolume, [[1,2,3], [3,3]]) # point dimensions are not equal - self.assertRaises(TypeError, hypervolume, []) # empty set of points #2 - - def test_pop_ctor(self): - # constructs the hypervolume object from a population object, expects to not raise any error - prob = problem.zdt(2) - pop = population(prob, 100) - - # construction from a population object - hypervolume(pop) - - # setting verification flag to False - hypervolume(pop, False) - - # setting verification flag to True - hypervolume(pop, True) - - def test_hypervolume_ctor_points(self): - # test various possibilities of construction - self.assertRaises(TypeError, hypervolume, "A string") # something other than a list or a string - self.assertRaises(TypeError, hypervolume, [[1,2,3], [2,3,4], [2,3,5]], "extra arg") # good point definition with extra arg (hypervolume(ps, "something extra") - self.assertRaises(TypeError, hypervolume, [[1,2,3], [2,3,4], "foo"]) # bad point - self.assertRaises(TypeError, hypervolume, [[1,2,3], [2,3,4], [2,3,"bar"]]) # bad point value - self.assertRaises(TypeError, hypervolume) # skipping argument: hypervolume() should raise TypeError - - self.assertRaises(TypeError, hypervolume, foo=self.good_ps_2d) # bad kwarg - self.assertRaises(TypeError, hypervolume, self.good_ps_2d, foo="bar") # extra kwarg + def setUp(self): + self.good_ps_2d = [[5, 2], [3, 4], [1, 5]] + + def test_dimension_size(self): + # fixed points dimensions should be reasonable + # points of f_dim = 1 + self.assertRaises(ValueError, hypervolume, [[1, ], [2, ]]) + # points of f_dim = 0 + self.assertRaises(ValueError, hypervolume, [[], [], ]) + # empty set of points + self.assertRaises(ValueError, hypervolume, [[], ]) + # point dimensions are not equal + self.assertRaises(ValueError, hypervolume, [[1, 2, 3], [3, 3]]) + self.assertRaises(TypeError, hypervolume, []) # empty set of points #2 + + def test_pop_ctor(self): + # constructs the hypervolume object from a population object, expects + # to not raise any error + prob = problem.zdt(2) + pop = population(prob, 100) + + # construction from a population object + hypervolume(pop) + + # setting verification flag to False + hypervolume(pop, False) + + # setting verification flag to True + hypervolume(pop, True) + + def test_hypervolume_ctor_points(self): + # test various possibilities of construction + # something other than a list or a string + self.assertRaises(TypeError, hypervolume, "A string") + # good point definition with extra arg (hypervolume(ps, "something + # extra") + self.assertRaises( + TypeError, hypervolume, [[1, 2, 3], [2, 3, 4], [2, 3, 5]], + "extra arg") + self.assertRaises( + TypeError, hypervolume, [[1, 2, 3], [2, 3, 4], "foo"]) # bad point + # bad point value + self.assertRaises( + TypeError, hypervolume, [[1, 2, 3], [2, 3, 4], [2, 3, "bar"]]) + # skipping argument: hypervolume() should raise TypeError + self.assertRaises(TypeError, hypervolume) + + self.assertRaises( + TypeError, hypervolume, foo=self.good_ps_2d) # bad kwarg + self.assertRaises( + TypeError, hypervolume, self.good_ps_2d, foo="bar") # extra kwarg + class HVFlagsTest(unittest.TestCase): - def test_gettersetter(self): - hv = hypervolume([[1,2,3],[4,5,6]]) - self.assertTrue(hv.get_verify() == True) - self.assertTrue(hv.get_copy_points() == True) - hv.set_verify(False) - hv.set_copy_points(False) - self.assertTrue(hv.get_verify() == False) - self.assertTrue(hv.get_copy_points() == False) + def test_gettersetter(self): + hv = hypervolume([[1, 2, 3], [4, 5, 6]]) + self.assertTrue(hv.get_verify()) + self.assertTrue(hv.get_copy_points()) + hv.set_verify(False) + hv.set_copy_points(False) + self.assertTrue(hv.get_verify() is False) + self.assertTrue(hv.get_copy_points() is False) + class HVComputeTest(unittest.TestCase): - def setUp(self): - self.hv2d = hypervolume([[3,1],[2,2],[1,3]]) - - def test_correct_out(self): - # simple 3D test - hv = hypervolume([[1,1,1],[2,2,2,]]) - self.assertEqual(hv.compute(r = [3,3,3]), 8) - - # simple 2D test - hv = hypervolume([[1,2],[2,1]]) - self.assertEqual(hv.compute(r = [3,3]), 3) - - # point on the border of refpoint (2D) - hv = hypervolume([[1,2],[2,1]]) - self.assertEqual(hv.compute([2,2]), 0) - - # points on the border of refpoint (3D) - hv = hypervolume([[1,2,1],[2,1,1]]) - self.assertEqual(hv.compute([2,2,2]), 0) - - def test4d_dominated(self): - hv = hypervolume([[1,1,1,1],[2,2,2,2]]) - self.assertEqual(hv.compute(r = (3,3,3,3), algorithm=hv_algorithm.hv4d()), 16) - - def test4d_edge(self): - hv = hypervolume([[1,1,1,3],[2,2,2,3]]) - self.assertEqual(hv.compute(r = (3,3,3,3), algorithm=hv_algorithm.hv4d()), 0) - - def test4d_duplicate(self): - hv = hypervolume([[1,1,1,1],[1,1,1,1]]) - self.assertEqual(hv.compute(r = (2,2,2,2), algorithm=hv_algorithm.hv4d()), 1) - - # Duplicate and dominated - hv = hypervolume([[1,1,1,1],[1,1,1,1],[0,0,0,0]]) - self.assertEqual(hv.compute(r = (2,2,2,2), algorithm=hv_algorithm.hv4d()), 16) - - def test_tuple_ctor(self): - # test that hypervolume can be computed using a tuple as well - hv = hypervolume(((1,1,1),(2,2,2,))) - self.assertEqual(hv.compute(r = (3,3,3)), 8) - - def test_casting_float(self): - # casting to float - self.assertEqual(self.hv2d.compute(["4.0","4"]), 6) - - def test_refpoint_not_dom(self): - # refpoint must be at least weakly dominated by every point (assuming minimization problem) - hv = hypervolume([[1,3],[2,2], [3,1]]) - self.assertRaises(ValueError, hv.compute, [3,1]) # equal to some other point - self.assertRaises(ValueError, hv.compute, [1.5,1.5]) # refpoint dominating some points - self.assertRaises(ValueError, hv.compute, [0,0]) # refpoint dominating all points - - def test_kwargs(self): - self.assertEqual(self.hv2d.compute(r=[3.5, 3.5]), 3.25) # using kwarg 'r' correctly - self.assertEqual(self.hv2d.compute(r=[3.5, 3.5], algorithm=hv_algorithm.hv2d()), 3.25) # using kwargs 'r', 'algorithm' correctly - - self.assertRaises(TypeError, self.hv2d.compute, refpoint=[4, 4]) # bad kwarg for reference point - self.assertRaises(TypeError, self.hv2d.compute, r=[4, 4], foo="Something extra") # we do not accept random kwargs - self.assertRaises(TypeError, self.hv2d.compute, [4, 4], foo="Something extra") # we do not accept random kwargs (as above but with ref point as arg) - self.assertRaises(TypeError, self.hv2d.compute, [4, 4], hv_algorithm.hv2d(), foo="Something extra") # we do not accept random kwargs - - def test_kwargs_hv_algo(self): - self.assertEqual(self.hv2d.compute(r=[4,4], algorithm=hv_algorithm.hv2d()), 6) # using kwargs correctly - self.assertEqual(self.hv2d.compute(algorithm=hv_algorithm.hv2d(), r=[4,4]), 6) # using kwargs in reversed order - self.assertEqual(self.hv2d.compute([4,4], algorithm=hv_algorithm.hv2d()), 6) # arg + kwarg - self.assertEqual(self.hv2d.compute([4,4], hv_algorithm.hv2d()), 6) # arg + arg - self.assertRaises(TypeError, self.hv2d.compute, algorithm=hv_algorithm.hv2d()) # should use nadir point - - def test_bad_algo(self): - self.assertRaises(ValueError, self.hv2d.compute, [4, 4], hv_algorithm.hv3d()) # 3d method to 2d problem + def setUp(self): + self.hv2d = hypervolume([[3, 1], [2, 2], [1, 3]]) + + def test_correct_out(self): + # simple 3D test + hv = hypervolume([[1, 1, 1], [2, 2, 2, ]]) + self.assertEqual(hv.compute(r=[3, 3, 3]), 8) + + # simple 2D test + hv = hypervolume([[1, 2], [2, 1]]) + self.assertEqual(hv.compute(r=[3, 3]), 3) + + # point on the border of refpoint (2D) + hv = hypervolume([[1, 2], [2, 1]]) + self.assertEqual(hv.compute([2, 2]), 0) + + # points on the border of refpoint (3D) + hv = hypervolume([[1, 2, 1], [2, 1, 1]]) + self.assertEqual(hv.compute([2, 2, 2]), 0) + + def test4d_dominated(self): + hv = hypervolume([[1, 1, 1, 1], [2, 2, 2, 2]]) + self.assertEqual( + hv.compute(r=(3, 3, 3, 3), algorithm=hv_algorithm.hv4d()), 16) + self.assertEqual( + hv.compute(r=(3, 3, 3, 3), algorithm=hv_algorithm.fpl()), 16) + + def test4d_edge(self): + hv = hypervolume([[1, 1, 1, 3], [2, 2, 2, 3]]) + self.assertEqual( + hv.compute(r=(3, 3, 3, 3), algorithm=hv_algorithm.hv4d()), 0) + self.assertEqual( + hv.compute(r=(3, 3, 3, 3), algorithm=hv_algorithm.fpl()), 0) + + def test4d_duplicate(self): + hv = hypervolume([[1, 1, 1, 1], [1, 1, 1, 1]]) + self.assertEqual( + hv.compute(r=(2, 2, 2, 2), algorithm=hv_algorithm.hv4d()), 1) + self.assertEqual( + hv.compute(r=(2, 2, 2, 2), algorithm=hv_algorithm.fpl()), 1) + + # Duplicate and dominated + hv = hypervolume([[1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0]]) + self.assertEqual( + hv.compute(r=(2, 2, 2, 2), algorithm=hv_algorithm.hv4d()), 16) + self.assertEqual( + hv.compute(r=(2, 2, 2, 2), algorithm=hv_algorithm.fpl()), 16) + + def test_tuple_ctor(self): + # test that hypervolume can be computed using a tuple as well + hv = hypervolume(((1, 1, 1), (2, 2, 2,))) + self.assertEqual(hv.compute(r=(3, 3, 3)), 8) + + def test_casting_float(self): + # casting to float + self.assertEqual(self.hv2d.compute(["4.0", "4"]), 6) + + def test_refpoint_not_dom(self): + # refpoint must be at least weakly dominated by every point (assuming + # minimization problem) + hv = hypervolume([[1, 3], [2, 2], [3, 1]]) + # equal to some other point + self.assertRaises(ValueError, hv.compute, [3, 1]) + # refpoint dominating some points + self.assertRaises(ValueError, hv.compute, [1.5, 1.5]) + # refpoint dominating all points + self.assertRaises(ValueError, hv.compute, [0, 0]) + + def test_kwargs(self): + # using kwarg 'r' correctly + self.assertEqual(self.hv2d.compute(r=[3.5, 3.5]), 3.25) + # using kwargs 'r', 'algorithm' correctly + self.assertEqual( + self.hv2d.compute(r=[3.5, 3.5], algorithm=hv_algorithm.hv2d()), + 3.25) + + # bad kwarg for reference point + self.assertRaises(TypeError, self.hv2d.compute, refpoint=[4, 4]) + # we do not accept random kwargs + self.assertRaises( + TypeError, self.hv2d.compute, r=[4, 4], foo="Something extra") + # we do not accept random kwargs (as above but with ref point as arg) + self.assertRaises( + TypeError, self.hv2d.compute, [4, 4], foo="Something extra") + self.assertRaises( + TypeError, self.hv2d.compute, [4, 4], hv_algorithm.hv2d(), + foo="Something extra") # we do not accept random kwargs + + def test_kwargs_hv_algo(self): + # using kwargs correctly + self.assertEqual( + self.hv2d.compute(r=[4, 4], algorithm=hv_algorithm.hv2d()), 6) + # using kwargs in reversed order + self.assertEqual( + self.hv2d.compute(algorithm=hv_algorithm.hv2d(), r=[4, 4]), 6) + # arg + kwarg + self.assertEqual( + self.hv2d.compute([4, 4], algorithm=hv_algorithm.hv2d()), 6) + self.assertEqual( + self.hv2d.compute([4, 4], hv_algorithm.hv2d()), 6) # arg + arg + # should use nadir point + self.assertRaises( + TypeError, self.hv2d.compute, algorithm=hv_algorithm.hv2d()) + + def test_bad_algo(self): + # 3d method to 2d problem + self.assertRaises( + ValueError, self.hv2d.compute, [4, 4], hv_algorithm.hv3d()) + class HVContributionsTest(unittest.TestCase): - def assertContribs(self, S, R, ans): - """ - This method is an assertion that given hypervolume problem constructed from S, with a reference point R - Returns a valid answer to the "contributions" feature both for the contributions method and the explicit call - for the exclusive hypervolume as well. - """ - hv = hypervolume(S) - self.assertEqual(hv.contributions(R), ans) - self.assertEqual(tuple(hv.exclusive(i, R) for i in xrange(len(S))), ans) - - def test2d(self): - """ - This test contains a front with 3 non dominated points, - and many dominated points. Most of the dominated points - lie on edges of the front, which makes their exclusive contribution - equal to 0. - """ - S = ((1, 6.5), (1, 6), (1, 5), (2, 5), (3, 5), (3, 3), (4, 6.5), - (4.5, 4), (5, 3), (5, 1.5), (7, 1.5), (7, 3.5), ) - R = (7, 6.5, ) - ans = (0.0, 0.0, 1.0, 0.0, 0.0, 3.5, 0.0, 0.0, 0.0, 3.0, 0.0, 0.0, ) - - self.assertContribs(S, R, ans) - - # Adding few extra points that share an edge with a reference point - extra = ((7,0.5), (7, 1.0), (7, 4.5), (0.0, 6.5), (5.5, 6.5), ) - S += extra - ans += (0, ) * len(extra) - - self.assertContribs(S, R, ans) - - # Adding few duplicated points on the inside and on the edges - extra = ((7,0.5), (5.5,6.5), (5,5), (5,5), (5,5), ) - S += extra - ans += (0, )*len(extra) - - self.assertContribs(S, R, ans) - - def test2d_gradual(self): - """ - Gradually adding duplicate points to the set, making sure the contribution change accordingly. - """ - S = ((1,1),) - R = (2,2) - self.assertContribs(S, R, (1,)) - - S += ((1,1),) - self.assertContribs(S, R, (0,0)) - - S += ((1,1),) - self.assertContribs(S, R, (0,0,0)) - - S += ((0.5, 0.5),) - self.assertContribs(S, R, (0, 0, 0, 1.25)) - - S += ((0.5, 0.5),) - self.assertContribs(S, R, (0,)*5) - - def test3d(self): - """ - This test contains a tricky front in 3D with some weakly dominated points on the "edges" of the bounding box. - """ - - # Non-tricky base problem - S = ((-6, -1, -6), (-1, -3, -5), (-3, -4, -4), (-4, -2, -3), (-5, -5, -2), (-2, -6, -1),) - R = (0, 0, 0) - ans = (18, 2, 12, 1, 18, 2) - - self.assertContribs(S, R, ans) - - # Add some points that contribute nothing and do not alter other contributions - extra = ((-3,-1,-3),(-1,-1,-5),(-1,-2,-4), (-1,-3,-4), (-7,-7,0), (0,-5,-5), (-7,0,-7)) - - S += extra - ans += (0,)*len(extra) - self.assertContribs(S, R, ans) - - def test3d_gradual(self): - """ - Gradually adding points, some of which are dominated or duplicates. - Tests whether contributions and repeated exclusive method produce the same results. - """ - S = ((3,3,3),) - R = (5,5,5) - self.assertContribs(S, R, (8,)) - - # Decrease the contribution of first point. Second point is dominated. - S += ((4,4,4),) - self.assertContribs(S, R, (7,0,)) - - # Add duplicate point - S += ((3,3,3),) - self.assertContribs(S, R, (0,0,0)) - - S += ((3,3,2),) - self.assertContribs(S, R, (0, 0, 0, 4)) - - S += ((3,3,1),) - self.assertContribs(S, R, (0, 0, 0, 0, 4)) - def test3d_extreme(self): - """ - Combine extreme points together. - Mixing small and large contributions in a single front - """ - - # Reset the set S. - # 3 duplicate points - R = (0,0,0) - S = ((-1,-1,-1),) * 3 - self.assertContribs(S, R, (0,)*3) - - # Adding a point far away - S += ((-1000,)*3,) - self.assertContribs(S, R, (0,0,0,999999999)) - - # Adding an even further point - S += ((-10000,)*3,) - self.assertContribs(S, R, (0,0,0,0,999000000000)) - - # Tiny box on top of a large one - S = ((-1000.001, -0.001, -0.001), (-1000, -1000, -1000)) - R = (0,0,0) - hv = hypervolume(S) - ans = (0.000000001,999999999.999) - c = list(hv.contributions(R)) - # Round contribution to 9th decimal place as the double type is loosing the exact accuracy - c[0] = round(c[0], 9) - self.assertEqual(tuple(c), ans) - - def test5d(self): - """ - Gradually adding points. - Tests whether contributions and repeated exclusive methods produce the same results. - """ - S = ((1,1,1,1,1), ) - R = (5,5,5,5,5) - self.assertContribs(S, R, (1024,)) - - S += ((4,4,4,4,4),) - self.assertContribs(S, R, (1023, 0, )) - - S += ((3,3,3,3,3),) - self.assertContribs(S, R, (992,0,0,)) - - S += ((1,1,1,1,1),) - self.assertContribs(S, R, (0,)*4) + + def assertContribs(self, S, R, ans): + """ + This method is an assertion that given hypervolume problem constructed from S, with a reference point R + Returns a valid answer to the "contributions" feature both for the contributions method and the explicit call + for the exclusive hypervolume as well. + """ + hv = hypervolume(S) + self.assertEqual(hv.contributions(R), ans) + self.assertEqual(tuple(hv.exclusive(i, R) for i in range(len(S))), ans) + + def test2d(self): + """ + This test contains a front with 3 non dominated points, + and many dominated points. Most of the dominated points + lie on edges of the front, which makes their exclusive contribution + equal to 0. + """ + S = ((1, 6.5), (1, 6), (1, 5), (2, 5), (3, 5), (3, 3), (4, 6.5), + (4.5, 4), (5, 3), (5, 1.5), (7, 1.5), (7, 3.5), ) + R = (7, 6.5, ) + ans = (0.0, 0.0, 1.0, 0.0, 0.0, 3.5, 0.0, 0.0, 0.0, 3.0, 0.0, 0.0, ) + + self.assertContribs(S, R, ans) + + # Adding few extra points that share an edge with a reference point + extra = ((7, 0.5), (7, 1.0), (7, 4.5), (0.0, 6.5), (5.5, 6.5), ) + S += extra + ans += (0, ) * len(extra) + + self.assertContribs(S, R, ans) + + # Adding few duplicated points on the inside and on the edges + extra = ((7, 0.5), (5.5, 6.5), (5, 5), (5, 5), (5, 5), ) + S += extra + ans += (0, ) * len(extra) + + self.assertContribs(S, R, ans) + + def test2d_gradual(self): + """ + Gradually adding duplicate points to the set, making sure the contribution change accordingly. + """ + S = ((1, 1),) + R = (2, 2) + self.assertContribs(S, R, (1,)) + + S += ((1, 1),) + self.assertContribs(S, R, (0, 0)) + + S += ((1, 1),) + self.assertContribs(S, R, (0, 0, 0)) + + S += ((0.5, 0.5),) + self.assertContribs(S, R, (0, 0, 0, 1.25)) + + S += ((0.5, 0.5),) + self.assertContribs(S, R, (0,) * 5) + + def test3d(self): + """ + This test contains a tricky front in 3D with some weakly dominated points on the "edges" of the bounding box. + """ + + # Non-tricky base problem + S = ((-6, -1, -6), (-1, -3, -5), (-3, -4, -4), + (-4, -2, -3), (-5, -5, -2), (-2, -6, -1),) + R = (0, 0, 0) + ans = (18, 2, 12, 1, 18, 2) + + self.assertContribs(S, R, ans) + + # Add some points that contribute nothing and do not alter other + # contributions + extra = ((-3, -1, -3), (-1, -1, -5), (-1, -2, -4), + (-1, -3, -4), (-7, -7, 0), (0, -5, -5), (-7, 0, -7)) + + S += extra + ans += (0,) * len(extra) + self.assertContribs(S, R, ans) + + def test3d_gradual(self): + """ + Gradually adding points, some of which are dominated or duplicates. + Tests whether contributions and repeated exclusive method produce the same results. + """ + S = ((3, 3, 3),) + R = (5, 5, 5) + self.assertContribs(S, R, (8,)) + + # Decrease the contribution of first point. Second point is dominated. + S += ((4, 4, 4),) + self.assertContribs(S, R, (7, 0,)) + + # Add duplicate point + S += ((3, 3, 3),) + self.assertContribs(S, R, (0, 0, 0)) + + S += ((3, 3, 2),) + self.assertContribs(S, R, (0, 0, 0, 4)) + + S += ((3, 3, 1),) + self.assertContribs(S, R, (0, 0, 0, 0, 4)) + + def test3d_extreme(self): + """ + Combine extreme points together. + Mixing small and large contributions in a single front + """ + + # Reset the set S. + # 3 duplicate points + R = (0, 0, 0) + S = ((-1, -1, -1),) * 3 + self.assertContribs(S, R, (0,) * 3) + + # Adding a point far away + S += ((-1000,) * 3,) + self.assertContribs(S, R, (0, 0, 0, 999999999)) + + # Adding an even further point + S += ((-10000,) * 3,) + self.assertContribs(S, R, (0, 0, 0, 0, 999000000000)) + + # Tiny box on top of a large one + S = ((-1000.001, -0.001, -0.001), (-1000, -1000, -1000)) + R = (0, 0, 0) + hv = hypervolume(S) + ans = (0.000000001, 999999999.999) + c = list(hv.contributions(R)) + # Round contribution to 9th decimal place as the double type is loosing + # the exact accuracy + c[0] = round(c[0], 9) + self.assertEqual(tuple(c), ans) + + def test4d(self): + """ + Gradually adding points. + Tests whether contributions and repeated exclusive methods produce the same results. + """ + S = ((1, 1, 1, 1), ) + R = (5, 5, 5, 5) + self.assertContribs(S, R, (256,)) + + S += ((4, 4, 4, 4),) + self.assertContribs(S, R, (255, 0, )) + + S += ((3, 3, 3, 3),) + self.assertContribs(S, R, (240, 0, 0)) + + S += ((1, 1, 1, 1),) + self.assertContribs(S, R, (0, ) * 4) + + def test5d(self): + """ + Gradually adding points. + Tests whether contributions and repeated exclusive methods produce the same results. + """ + S = ((1, 1, 1, 1, 1), ) + R = (5, 5, 5, 5, 5) + self.assertContribs(S, R, (1024,)) + + S += ((4, 4, 4, 4, 4),) + self.assertContribs(S, R, (1023, 0, )) + + S += ((3, 3, 3, 3, 3),) + self.assertContribs(S, R, (992, 0, 0,)) + + S += ((1, 1, 1, 1, 1),) + self.assertContribs(S, R, (0,) * 4) + class HVLeastContribTest(unittest.TestCase): - def setUp(self): - self.r = [4,4] - self.hv2d_eq_0 = hypervolume([[3,1],[2,2],[1,3]]) # LC in [0,1,2] - self.hv2d_eq_1 = hypervolume([[2.5,1],[2,2],[1,3]]) # LC = 1 - self.hv2d_0 = hypervolume([[3.5,1],[2,2],[1,3]]) - self.hv2d_1 = hypervolume([[3,1],[2.5,2.5],[1,3]]) - self.hv2d_2 = hypervolume([[3,1],[2,2],[1,3.5]]) - - def test_correct_out(self): - self.assertTrue(self.hv2d_eq_0.least_contributor(r=self.r) in [0,1,2]) - - self.assertEqual(self.hv2d_eq_1.least_contributor(r=self.r), 1) - self.assertEqual(self.hv2d_0.least_contributor(r=self.r), 0) - self.assertEqual(self.hv2d_1.least_contributor(r=self.r), 1) - self.assertEqual(self.hv2d_2.least_contributor(r=self.r), 2) - - def test_kwargs(self): - self.assertEqual(self.hv2d_1.least_contributor(r=self.r), 1) # using kwarg 'r' correctly - self.assertEqual(self.hv2d_1.least_contributor(r=self.r, algorithm=hv_algorithm.hv2d()), 1) # using kwarg 'r' and 'algorithm' correctly - - self.assertRaises(TypeError, self.hv2d_0.least_contributor, refpoint=[4, 4]) # bad kwarg for reference point - self.assertRaises(TypeError, self.hv2d_0.least_contributor, r=[4, 4], foo="Something extra") # we do not accept random kwargs - self.assertRaises(TypeError, self.hv2d_0.least_contributor, [4, 4], foo="Something extra") # we do not accept random kwargs (as above but with ref point as arg) - self.assertRaises(TypeError, self.hv2d_0.least_contributor, [4, 4], hv_algorithm.hv2d(), foo="Something extra") # we do not accept random kwargs - - def test_bad_algo(self): - self.assertRaises(ValueError, self.hv2d_0.least_contributor, [4, 4], hv_algorithm.hv3d()) # 3d method to 2d problem + def setUp(self): + self.r = [4, 4] + self.hv2d_eq_0 = hypervolume([[3, 1], [2, 2], [1, 3]]) # LC in [0,1,2] + self.hv2d_eq_1 = hypervolume([[2.5, 1], [2, 2], [1, 3]]) # LC = 1 + self.hv2d_0 = hypervolume([[3.5, 1], [2, 2], [1, 3]]) + self.hv2d_1 = hypervolume([[3, 1], [2.5, 2.5], [1, 3]]) + self.hv2d_2 = hypervolume([[3, 1], [2, 2], [1, 3.5]]) + + def test_correct_out(self): + self.assertTrue( + self.hv2d_eq_0.least_contributor(r=self.r) in [0, 1, 2]) + + self.assertEqual(self.hv2d_eq_1.least_contributor(r=self.r), 1) + self.assertEqual(self.hv2d_0.least_contributor(r=self.r), 0) + self.assertEqual(self.hv2d_1.least_contributor(r=self.r), 1) + self.assertEqual(self.hv2d_2.least_contributor(r=self.r), 2) + + def test_kwargs(self): + # using kwarg 'r' correctly + self.assertEqual(self.hv2d_1.least_contributor(r=self.r), 1) + # using kwarg 'r' and 'algorithm' correctly + self.assertEqual( + self.hv2d_1.least_contributor( + r=self.r, + algorithm=hv_algorithm.hv2d()), + 1) + + # bad kwarg for reference point + self.assertRaises( + TypeError, self.hv2d_0.least_contributor, refpoint=[4, 4]) + self.assertRaises( + TypeError, self.hv2d_0.least_contributor, r=[ + 4, 4], foo="Something extra") # we do not accept random kwargs + # we do not accept random kwargs (as above but with ref point as arg) + self.assertRaises( + TypeError, self.hv2d_0.least_contributor, [ + 4, 4], foo="Something extra") + self.assertRaises( + TypeError, self.hv2d_0.least_contributor, [4, 4], hv_algorithm. + hv2d(), foo="Something extra") # we do not accept random kwargs + + def test_bad_algo(self): + self.assertRaises( + ValueError, self.hv2d_0.least_contributor, [ + 4, 4], hv_algorithm.hv3d()) # 3d method to 2d problem + class HVExclusiveTest(unittest.TestCase): - def setUp(self): - self.r = [4,4] - self.hv2d = hypervolume([[3,1],[2,2],[1,3]]) # all are equal (take first -> idx = 0) - self.hv2d_2 = hypervolume([[3.1,1],[2,2],[1,3]]) # all are equal (take first -> idx = 0) - - def test_correct_out(self): - self.assertEqual(self.hv2d.exclusive(p_idx=0, r=self.r), 1) - self.assertEqual(self.hv2d.exclusive(p_idx=1, r=self.r), 1) - self.assertEqual(self.hv2d.exclusive(p_idx=2, r=self.r), 1) - self.assertTrue(abs(self.hv2d_2.exclusive(p_idx=0, r=self.r) - 0.9) < 0.00000001) - - def test_kwargs(self): - self.assertRaises(TypeError,self.hv2d.exclusive, 0) # no refpoint - self.assertRaises(TypeError,self.hv2d.exclusive, p_idx=0) # no refpoint - self.assertRaises(TypeError,self.hv2d.exclusive, p_idx=0, hv_algorithm=hv_algorithm.wfg()) # no refpoint - self.assertEqual(self.hv2d.exclusive(0, self.r), 1) # using arg - self.assertEqual(self.hv2d.exclusive(0, r=self.r), 1) # using kwarg 'r' correctly - self.assertEqual(self.hv2d.exclusive(p_idx=0, r=self.r), 1) # using kwarg 'r' correctly - - self.assertRaises(TypeError, self.hv2d.exclusive, p_idx=0, algorithm=hv_algorithm.hv2d()) # no refpoint - self.assertEqual(self.hv2d.exclusive(0, self.r, hv_algorithm.hv2d()), 1) # all args - self.assertEqual(self.hv2d.exclusive(0, self.r, algorithm=hv_algorithm.hv2d()), 1) # last kwarg - self.assertEqual(self.hv2d.exclusive(p_idx=0, r=self.r, algorithm=hv_algorithm.hv2d()), 1) # all kwargs - self.assertEqual(self.hv2d.exclusive(algorithm=hv_algorithm.hv2d(), r=self.r, p_idx=0), 1) # all kwargs in reverse - - self.assertRaises(TypeError, self.hv2d.exclusive, 0, refpoint=[4, 4]) # bad kwarg for reference point - self.assertRaises(TypeError, self.hv2d.exclusive, 0, r=[4, 4], foo="Something extra") # we do not accept random kwargs - self.assertRaises(TypeError, self.hv2d.exclusive, 0, [4, 4], foo="Something extra") # we do not accept random kwargs (as above but with ref point as arg) - self.assertRaises(TypeError, self.hv2d.exclusive, 0, [4, 4], hv_algorithm.hv2d(), foo="Something extra") # we do not accept random kwargs - self.assertRaises(TypeError, self.hv2d.exclusive, p_idx=0, r=self.r, algorithm=hv_algorithm.hv2d(), foo="Something extra") - self.assertRaises(TypeError, self.hv2d.exclusive, r=self.r, algorithm=hv_algorithm.hv2d()) # no p_idx - - def test_p_idx(self): - self.assertRaises(TypeError, self.hv2d.exclusive, -1, self.r) # negative idx - self.assertRaises(ValueError, self.hv2d.exclusive, 100, self.r) # large - self.assertRaises(TypeError, self.hv2d.exclusive, "not an int", self.r) # large - self.assertRaises(TypeError, self.hv2d.exclusive) # p_idx not provided - - def test_bad_algo(self): - self.assertRaises(ValueError, self.hv2d.exclusive, 0, [4, 4], hv_algorithm.hv3d()) # 3d method to 2d problem + + def setUp(self): + self.r = [4, 4] + # all are equal (take first -> idx = 0) + self.hv2d = hypervolume([[3, 1], [2, 2], [1, 3]]) + # all are equal (take first -> idx = 0) + self.hv2d_2 = hypervolume([[3.1, 1], [2, 2], [1, 3]]) + + def test_correct_out(self): + self.assertEqual(self.hv2d.exclusive(p_idx=0, r=self.r), 1) + self.assertEqual(self.hv2d.exclusive(p_idx=1, r=self.r), 1) + self.assertEqual(self.hv2d.exclusive(p_idx=2, r=self.r), 1) + self.assertTrue( + abs(self.hv2d_2.exclusive(p_idx=0, r=self.r) - 0.9) < 0.00000001) + + def test_kwargs(self): + self.assertRaises(TypeError, self.hv2d.exclusive, 0) # no refpoint + # no refpoint + self.assertRaises(TypeError, self.hv2d.exclusive, p_idx=0) + self.assertRaises( + TypeError, + self.hv2d.exclusive, + p_idx=0, + hv_algorithm=hv_algorithm.wfg()) # no refpoint + self.assertEqual(self.hv2d.exclusive(0, self.r), 1) # using arg + # using kwarg 'r' correctly + self.assertEqual(self.hv2d.exclusive(0, r=self.r), 1) + # using kwarg 'r' correctly + self.assertEqual(self.hv2d.exclusive(p_idx=0, r=self.r), 1) + + self.assertRaises( + TypeError, + self.hv2d.exclusive, + p_idx=0, + algorithm=hv_algorithm.hv2d()) # no refpoint + self.assertEqual( + self.hv2d.exclusive(0, self.r, hv_algorithm.hv2d()), 1) # all args + self.assertEqual( + self.hv2d.exclusive( + 0, + self.r, + algorithm=hv_algorithm.hv2d()), + 1) # last kwarg + self.assertEqual(self.hv2d.exclusive( + p_idx=0, r=self.r, algorithm=hv_algorithm.hv2d()), 1) # all kwargs + self.assertEqual( + self.hv2d.exclusive( + algorithm=hv_algorithm.hv2d(), + r=self.r, + p_idx=0), + 1) # all kwargs in reverse + + # bad kwarg for reference point + self.assertRaises(TypeError, self.hv2d.exclusive, 0, refpoint=[4, 4]) + # we do not accept random kwargs + self.assertRaises( + TypeError, self.hv2d.exclusive, 0, r=[4, 4], foo="Something extra") + # we do not accept random kwargs (as above but with ref point as arg) + self.assertRaises( + TypeError, self.hv2d.exclusive, 0, [4, 4], foo="Something extra") + self.assertRaises( + TypeError, self.hv2d.exclusive, 0, [4, 4], hv_algorithm.hv2d(), + foo="Something extra") # we do not accept random kwargs + self.assertRaises( + TypeError, + self.hv2d.exclusive, + p_idx=0, + r=self.r, + algorithm=hv_algorithm.hv2d(), + foo="Something extra") + self.assertRaises(TypeError, self.hv2d.exclusive, + r=self.r, algorithm=hv_algorithm.hv2d()) # no p_idx + + def test_p_idx(self): + # negative idx + self.assertRaises(TypeError, self.hv2d.exclusive, -1, self.r) + self.assertRaises( + ValueError, self.hv2d.exclusive, 100, self.r) # large + self.assertRaises( + TypeError, self.hv2d.exclusive, "not an int", self.r) # large + self.assertRaises(TypeError, self.hv2d.exclusive) # p_idx not provided + + def test_bad_algo(self): + # 3d method to 2d problem + self.assertRaises( + ValueError, self.hv2d.exclusive, 0, [4, 4], hv_algorithm.hv3d()) + class HVNadirPointTest(unittest.TestCase): - def setUp(self): - self.hv2d = hypervolume([[3,1],[2,2],[1,3]]) + def setUp(self): + self.hv2d = hypervolume([[3, 1], [2, 2], [1, 3]]) + + def test_nadir_point(self): + # default nadir point + self.assertEqual(tuple(self.hv2d.get_nadir_point()), (3, 3)) + # custom nadir point + self.assertEqual(tuple(self.hv2d.get_nadir_point(5.0)), (8, 8)) + # nadir point with eps=0.0 + self.assertEqual(tuple(self.hv2d.get_nadir_point(0.0)), (3, 3)) + # nadir point with eps=-0.0 is ok + self.assertEqual(tuple(self.hv2d.get_nadir_point(-0.0)), (3, 3)) + # custom nadir point with 'eps' kwarg + self.assertEqual(tuple(self.hv2d.get_nadir_point(eps=5.0)), (8, 8)) + # nadir point with negative eps + self.assertRaises(ValueError, self.hv2d.get_nadir_point, -0.0000001) + self.assertRaises( + TypeError, self.hv2d.get_nadir_point, eps="foo") # bad kwarg + self.assertRaises( + TypeError, self.hv2d.get_nadir_point, "foo") # bad arg + # bad kwarg name + self.assertRaises(TypeError, self.hv2d.get_nadir_point, epsilon=1.0) - def test_nadir_point(self): - self.assertEqual(tuple(self.hv2d.get_nadir_point()), (3,3)) # default nadir point - self.assertEqual(tuple(self.hv2d.get_nadir_point(5.0)), (8,8)) # custom nadir point - self.assertEqual(tuple(self.hv2d.get_nadir_point(0.0)), (3,3)) # nadir point with eps=0.0 - self.assertEqual(tuple(self.hv2d.get_nadir_point(-0.0)), (3,3)) # nadir point with eps=-0.0 is ok - self.assertEqual(tuple(self.hv2d.get_nadir_point(eps=5.0)), (8,8)) # custom nadir point with 'eps' kwarg - self.assertRaises(ValueError, self.hv2d.get_nadir_point, -0.0000001) # nadir point with negative eps - self.assertRaises(TypeError, self.hv2d.get_nadir_point, eps="foo") # bad kwarg - self.assertRaises(TypeError, self.hv2d.get_nadir_point, "foo") # bad arg - self.assertRaises(TypeError, self.hv2d.get_nadir_point, epsilon=1.0) # bad kwarg name class HVAlgorithms(unittest.TestCase): - def setUp(self): - self.ps4d_0 = [[1.,]*4,] - self.ps3d_0 = [[1.,]*3,] - self.r4d_0 = [2,]*4 - self.r3d_0 = [2,]*3 - - def test_wfg(self): - self.assertRaises(ValueError, hv_algorithm.wfg, stop_dimension = 0) # stop_dimension = 0 - self.assertRaises(ValueError, hv_algorithm.wfg, stop_dimension = 1) # stop_dimension = 1 - def test_hv4d(self): - hv3d = hypervolume(self.ps3d_0) - hv4d = hypervolume(self.ps4d_0) - - self.assertEqual(1.0, hv4d.compute(self.r4d_0, algorithm=hv_algorithm.hv4d())) - self.assertRaises(ValueError, hv3d.compute, r=self.r3d_0, algorithm=hv_algorithm.hv4d()) - self.assertRaises(ValueError, hv3d.compute, r=self.r4d_0, algorithm=hv_algorithm.hv4d()) - self.assertRaises(ValueError, hv4d.compute, r=self.r3d_0, algorithm=hv_algorithm.hv4d()) + + def setUp(self): + self.ps4d_0 = [[1., ] * 4, ] + self.ps3d_0 = [[1., ] * 3, ] + self.r4d_0 = [2, ] * 4 + self.r3d_0 = [2, ] * 3 + + def test_wfg(self): + # stop_dimension = 0 + self.assertRaises(ValueError, hv_algorithm.wfg, stop_dimension=0) + # stop_dimension = 1 + self.assertRaises(ValueError, hv_algorithm.wfg, stop_dimension=1) + + def test_hv4d(self): + hv3d = hypervolume(self.ps3d_0) + hv4d = hypervolume(self.ps4d_0) + + self.assertEqual( + 1.0, hv4d.compute(self.r4d_0, algorithm=hv_algorithm.hv4d())) + self.assertRaises( + ValueError, + hv3d.compute, + r=self.r3d_0, + algorithm=hv_algorithm.hv4d()) + self.assertRaises( + ValueError, + hv3d.compute, + r=self.r4d_0, + algorithm=hv_algorithm.hv4d()) + self.assertRaises( + ValueError, + hv4d.compute, + r=self.r3d_0, + algorithm=hv_algorithm.hv4d()) + def get_hv_suite(): - suite = unittest.TestSuite() - suite.addTests(unittest.makeSuite(HVCtorTest)) - suite.addTests(unittest.makeSuite(HVComputeTest)) - suite.addTests(unittest.makeSuite(HVLeastContribTest)) - suite.addTests(unittest.makeSuite(HVExclusiveTest)) - suite.addTests(unittest.makeSuite(HVNadirPointTest)) - suite.addTests(unittest.makeSuite(HVAlgorithms)) - suite.addTests(unittest.makeSuite(HVFlagsTest)) - suite.addTests(unittest.makeSuite(HVContributionsTest)) - return suite + suite = unittest.TestSuite() + suite.addTests(unittest.makeSuite(HVCtorTest)) + suite.addTests(unittest.makeSuite(HVComputeTest)) + suite.addTests(unittest.makeSuite(HVLeastContribTest)) + suite.addTests(unittest.makeSuite(HVExclusiveTest)) + suite.addTests(unittest.makeSuite(HVNadirPointTest)) + suite.addTests(unittest.makeSuite(HVAlgorithms)) + suite.addTests(unittest.makeSuite(HVFlagsTest)) + suite.addTests(unittest.makeSuite(HVContributionsTest)) + return suite diff --git a/PyGMO/topology/__init__.py b/PyGMO/topology/__init__.py index bd8f8077..2288f757 100644 --- a/PyGMO/topology/__init__.py +++ b/PyGMO/topology/__init__.py @@ -1,304 +1,335 @@ # -*- coding: utf-8 -*- -from _topology import * +from PyGMO.topology._topology import * # Some methods added to interface to networkx def _to_networkx(self): - """ - Export topology as a networkx DiGraph. - """ - try: - import networkx as nx - except ImportError: - raise ImportError('Could not import the networkx module.') - retval = nx.DiGraph() - for i in range(self.number_of_vertices): - if self.get_num_adjacent_vertices(i): - retval.add_edges_from([(i,n) for n in self.get_adjacent_vertices(i)]) - else: - retval.add_node(i) - return retval + """ + Export topology as a networkx DiGraph. + """ + try: + import networkx as nx + except ImportError: + raise ImportError('Could not import the networkx module.') + retval = nx.DiGraph() + for i in range(self.number_of_vertices): + if self.get_num_adjacent_vertices(i): + retval.add_edges_from([(i, n) + for n in self.get_adjacent_vertices(i)]) + else: + retval.add_node(i) + return retval _topology._base.to_networkx = _to_networkx -def _draw(self, layout = 'spring', n_color = 'blue', n_size = 15, n_alpha = 0.5, e_alpha = 0.1, e_arrows=False, scale_by_degree = False): - """ - Draw topology using the draw() command from networkx. - - USAGE: topology.draw(layout = "spring", n_size = 15, scale_by_degree = False, n_color = 'blue', n_alpha = 0.5, e_alpha = 0.1, e_arrows=False) - - * layout: Network layout. Can be 'spring' or 'circular'. - * n_size: The size of nodes. Becomes scaling factor when scale_by_degree=True. - * scale_by_degree: When True, nodes will be sized proportional to their degree. - * n_color: Node color. - * n_alpha: Transparency of nodes. Takes value between 0 and 1. - * e_elpha: Transparency of edges. Takes value between 0 and 1. - * e_arrows: Plots arrows on the edges for directed graphs - * scale_by_degree: When True, nodes will be sized proportional to their degree. - """ - try: - import networkx as nx - except ImportError: - raise ImportError('Could not import the networkx module.') - try: - import matplotlib.pyplot as pl - except ImportError: - raise ImportError('Could not improt the MatPlotLib module.') - if self.number_of_vertices == 0 or self.number_of_vertices == 1: - raise ValueError('Cannot draw topology with one single vertex or less.') - - G = self.to_networkx() - node_sizes = range(nx.number_of_nodes(G)) - for i in range(nx.number_of_nodes(G)): - if scale_by_degree: - node_sizes[i] = nx.degree(G,i)*n_size - else: - node_sizes[i] = n_size - - if layout == "spring": - pos = nx.spring_layout(self.to_networkx()) - if layout == "circular": - pos = nx.circular_layout(self.to_networkx()) - - pl.figure() - nx.draw_networkx_edges(self.to_networkx(),pos,alpha=e_alpha,arrows=e_arrows) - nx.draw_networkx_nodes(self.to_networkx(),pos,node_size=node_sizes,node_color=n_color,alpha=n_alpha) - pl.axis('off') - pl.show() + +def _draw( + self, + layout='spring', + n_color='blue', + n_size=15, + n_alpha=0.5, + e_alpha=0.1, + e_arrows=False, + scale_by_degree=False): + """ + Draw topology using the draw() command from networkx. + + USAGE: topology.draw(layout = "spring", n_size = 15, scale_by_degree = False, n_color = 'blue', n_alpha = 0.5, e_alpha = 0.1, e_arrows=False) + + * layout: Network layout. Can be 'spring' or 'circular'. + * n_size: The size of nodes. Becomes scaling factor when scale_by_degree=True. + * scale_by_degree: When True, nodes will be sized proportional to their degree. + * n_color: Node color. + * n_alpha: Transparency of nodes. Takes value between 0 and 1. + * e_elpha: Transparency of edges. Takes value between 0 and 1. + * e_arrows: Plots arrows on the edges for directed graphs + * scale_by_degree: When True, nodes will be sized proportional to their degree. + """ + try: + import networkx as nx + except ImportError: + raise ImportError('Could not import the networkx module.') + try: + import matplotlib.pyplot as pl + except ImportError: + raise ImportError('Could not improt the MatPlotLib module.') + if self.number_of_vertices == 0 or self.number_of_vertices == 1: + raise ValueError( + 'Cannot draw topology with one single vertex or less.') + + G = self.to_networkx() + node_sizes = list(range(nx.number_of_nodes(G))) + for i in range(nx.number_of_nodes(G)): + if scale_by_degree: + node_sizes[i] = nx.degree(G, i) * n_size + else: + node_sizes[i] = n_size + + if layout == "spring": + pos = nx.spring_layout(self.to_networkx()) + if layout == "circular": + pos = nx.circular_layout(self.to_networkx()) + + pl.figure() + nx.draw_networkx_edges( + self.to_networkx(), pos, alpha=e_alpha, arrows=e_arrows) + nx.draw_networkx_nodes( + self.to_networkx(), + pos, + node_size=node_sizes, + node_color=n_color, + alpha=n_alpha) + pl.axis('off') + pl.show() _topology._base.draw = _draw -def _draw_degree_distribution(self, style = '.'): - """ - Plot the degree dstribution on a loglog scale - - USAGE topology.draw_degree_distribution(style = 'r.') - - style: MatPlotLib line style. - """ - try: - import matplotlib.pyplot as pl - except ImportError: - raise ImportError('Could not improt the MatPlotLib module.') - dd = self.get_degree_distribution(); - pl.figure() - pl.loglog(dd,style) - pl.xlabel('k - number of links') - pl.ylabel('p(k) - probability of k') - pl.show() + +def _draw_degree_distribution(self, style='.'): + """ + Plot the degree dstribution on a loglog scale + + USAGE topology.draw_degree_distribution(style = 'r.') + + style: MatPlotLib line style. + """ + try: + import matplotlib.pyplot as pl + except ImportError: + raise ImportError('Could not improt the MatPlotLib module.') + dd = self.get_degree_distribution() + pl.figure() + pl.loglog(dd, style) + pl.xlabel('k - number of links') + pl.ylabel('p(k) - probability of k') + pl.show() _topology._base.draw_degree_distribution = _draw_degree_distribution # Re-exposing the constructors -def _watts_strogatz_ctor(self, k = 10, p = 0.1, nodes = 0): - """ - Constructs Watts-Strogatz Topology: - - USAGE: topology.watts-strogatz(k = 10, p = 0.1, nodes=0) - - * k: number of neighbours - * p: rewiring probability [0,1] - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(k) - arg_list.append(p) - arg_list.append(nodes) - self._orig_init(*arg_list) + + +def _watts_strogatz_ctor(self, k=10, p=0.1, nodes=0): + """ + Constructs Watts-Strogatz Topology: + + USAGE: topology.watts-strogatz(k = 10, p = 0.1, nodes=0) + + * k: number of neighbours + * p: rewiring probability [0,1] + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(k) + arg_list.append(p) + arg_list.append(nodes) + self._orig_init(*arg_list) watts_strogatz._orig_init = watts_strogatz.__init__ watts_strogatz.__init__ = _watts_strogatz_ctor -def _erdos_renyi_ctor(self, p = 0.1, nodes=0): - """ - Constructs an Erdos-Renyi (random) topology: - - USAGE: topology.erdos_renyi(p = 0.1, nodes=0) - - * p: wiring probability [0,1] - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(p) - self._orig_init(*arg_list) - for i in range(nodes): - self.push_back() + +def _erdos_renyi_ctor(self, p=0.1, nodes=0): + """ + Constructs an Erdos-Renyi (random) topology: + + USAGE: topology.erdos_renyi(p = 0.1, nodes=0) + + * p: wiring probability [0,1] + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(p) + self._orig_init(*arg_list) + for i in range(nodes): + self.push_back() erdos_renyi._orig_init = erdos_renyi.__init__ erdos_renyi.__init__ = _erdos_renyi_ctor -def _barabasi_albert_ctor(self, m0 = 3, m=3, nodes = 0): - """ - Constructs an Barabasi-Albert topology: - - USAGE: topology.barabasi_albert(m0 = 3, m = 3, nodes = 0) - - * m0: kernel size - * m: number of random connections to be established when a new node is added. - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(m0) - arg_list.append(m) - self._orig_init(*arg_list) - for i in range(nodes): - self.push_back() + +def _barabasi_albert_ctor(self, m0=3, m=3, nodes=0): + """ + Constructs an Barabasi-Albert topology: + + USAGE: topology.barabasi_albert(m0 = 3, m = 3, nodes = 0) + + * m0: kernel size + * m: number of random connections to be established when a new node is added. + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(m0) + arg_list.append(m) + self._orig_init(*arg_list) + for i in range(nodes): + self.push_back() barabasi_albert._orig_init = barabasi_albert.__init__ barabasi_albert.__init__ = _barabasi_albert_ctor -def _clustered_ba_ctor(self, m0 = 3, m=3, p=0.5, nodes = 0): - """ - Constructs a Clustered Barabasi-Albert topology: - - USAGE: topology.clustered_ba(mm0 = 3, m=3, p=0.5, nodes = 0) - - * m0: kernel size - * m: nnumber of random connections to be established when a new node is added - * p: probability that a connection is established between two nodes that are adjacent to a new node - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(m0) - arg_list.append(m) - arg_list.append(p) - self._orig_init(*arg_list) - for i in range(nodes): - self.push_back() + +def _clustered_ba_ctor(self, m0=3, m=3, p=0.5, nodes=0): + """ + Constructs a Clustered Barabasi-Albert topology: + + USAGE: topology.clustered_ba(mm0 = 3, m=3, p=0.5, nodes = 0) + + * m0: kernel size + * m: nnumber of random connections to be established when a new node is added + * p: probability that a connection is established between two nodes that are adjacent to a new node + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(m0) + arg_list.append(m) + arg_list.append(p) + self._orig_init(*arg_list) + for i in range(nodes): + self.push_back() clustered_ba._orig_init = clustered_ba.__init__ clustered_ba.__init__ = _clustered_ba_ctor -def _ageing_clustered_ba_ctor(self, m0 = 3, m=3, p=0.5, a=1000, nodes = 0): - """ - Constructs a Clustered Barabási-Albert with Ageing vertices graph topology.: - - USAGE: topology.clustered_ba(m0 = 3, m=3, p=0.5, a=1000, nodes = 0) - - * m0: kernel size - * m: number of random connections to be established when a new node is added - * p: probability that a connection is established between two nodes that are adjacent to a new node - * a: 'age' at which a node ceases to make new connections. - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(m0) - arg_list.append(m) - arg_list.append(p) - arg_list.append(a) - self._orig_init(*arg_list) - for i in range(nodes): - self.push_back() + +def _ageing_clustered_ba_ctor(self, m0=3, m=3, p=0.5, a=1000, nodes=0): + """ + Constructs a Clustered Barabási-Albert with Ageing vertices graph topology.: + + USAGE: topology.clustered_ba(m0 = 3, m=3, p=0.5, a=1000, nodes = 0) + + * m0: kernel size + * m: number of random connections to be established when a new node is added + * p: probability that a connection is established between two nodes that are adjacent to a new node + * a: 'age' at which a node ceases to make new connections. + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(m0) + arg_list.append(m) + arg_list.append(p) + arg_list.append(a) + self._orig_init(*arg_list) + for i in range(nodes): + self.push_back() ageing_clustered_ba._orig_init = ageing_clustered_ba.__init__ ageing_clustered_ba.__init__ = _ageing_clustered_ba_ctor -def _fully_connected_ctor(self, nodes = 0): - """ - Constructs a fully connected topology: - USAGE: topology.fully_connected(nodes = 0) +def _fully_connected_ctor(self, nodes=0): + """ + Constructs a fully connected topology: - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - self._orig_init(*arg_list) - for i in range(nodes): - self.push_back() + USAGE: topology.fully_connected(nodes = 0) + + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + self._orig_init(*arg_list) + for i in range(nodes): + self.push_back() fully_connected._orig_init = fully_connected.__init__ fully_connected.__init__ = _fully_connected_ctor -def _hypercube_ctor(self, nodes = 0): - """ - Constructs a hypercube topology: - USAGE: topology.hypercube(nodes = 0) +def _hypercube_ctor(self, nodes=0): + """ + Constructs a hypercube topology: + + USAGE: topology.hypercube(nodes = 0) - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - self._orig_init(*arg_list) - for i in range(nodes): - self.push_back() + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + self._orig_init(*arg_list) + for i in range(nodes): + self.push_back() hypercube._orig_init = hypercube.__init__ hypercube.__init__ = _hypercube_ctor -def _one_way_ring_ctor(self, nodes = 0): - """ - Constructs a one_way_ring topology: - USAGE: topology.one_way_ring(nodes = 0) +def _one_way_ring_ctor(self, nodes=0): + """ + Constructs a one_way_ring topology: - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - self._orig_init(*arg_list) - for i in range(nodes): - self.push_back() + USAGE: topology.one_way_ring(nodes = 0) + + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + self._orig_init(*arg_list) + for i in range(nodes): + self.push_back() one_way_ring._orig_init = one_way_ring.__init__ one_way_ring.__init__ = _one_way_ring_ctor -def _pan_ctor(self, nodes = 0): - """ - Constructs a pan topology: this is, essentially, a ring with a sink node. This topology was 'invented' to support a local - optimization algorithm in the sink node - USAGE: topology.pan(nodes = 0) +def _pan_ctor(self, nodes=0): + """ + Constructs a pan topology: this is, essentially, a ring with a sink node. This topology was 'invented' to support a local + optimization algorithm in the sink node + + USAGE: topology.pan(nodes = 0) - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - self._orig_init(*arg_list) - for i in range(nodes): - self.push_back() + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + self._orig_init(*arg_list) + for i in range(nodes): + self.push_back() pan._orig_init = pan.__init__ pan.__init__ = _pan_ctor -def _rim_ctor(self, nodes = 0): - """ - Constructs a rim topology: this is, essentially, a ring + one node that connects all ring nodes bi-directionally - USAGE: topology.rim(nodes = 0) +def _rim_ctor(self, nodes=0): + """ + Constructs a rim topology: this is, essentially, a ring + one node that connects all ring nodes bi-directionally + + USAGE: topology.rim(nodes = 0) - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - self._orig_init(*arg_list) - for i in range(nodes): - self.push_back() + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + self._orig_init(*arg_list) + for i in range(nodes): + self.push_back() rim._orig_init = rim.__init__ rim.__init__ = _rim_ctor -def _ring_ctor(self, nodes = 0): - """ - Constructs a ring topology - USAGE: topology.ring(nodes = 0) +def _ring_ctor(self, nodes=0): + """ + Constructs a ring topology - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - self._orig_init(*arg_list) - for i in range(nodes): - self.push_back() + USAGE: topology.ring(nodes = 0) + + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + self._orig_init(*arg_list) + for i in range(nodes): + self.push_back() ring._orig_init = ring.__init__ ring.__init__ = _ring_ctor -def _unconnected_ctor(self, nodes = 0): - """ - Constructs an unconnected topology - USAGE: topology.unconnected(nodes = 0) +def _unconnected_ctor(self, nodes=0): + """ + Constructs an unconnected topology + + USAGE: topology.unconnected(nodes = 0) - * nodes: number of nodes - """ - # We set the defaults or the kwargs - arg_list=[] - self._orig_init(*arg_list) - for i in range(nodes): - self.push_back() + * nodes: number of nodes + """ + # We set the defaults or the kwargs + arg_list = [] + self._orig_init(*arg_list) + for i in range(nodes): + self.push_back() unconnected._orig_init = unconnected.__init__ unconnected.__init__ = _unconnected_ctor diff --git a/PyGMO/util/__init__.py b/PyGMO/util/__init__.py index 574c7f56..f2938f5b 100644 --- a/PyGMO/util/__init__.py +++ b/PyGMO/util/__init__.py @@ -1,479 +1,560 @@ # -*- coding: utf-8 -*- -from _util import * -from _util.hv_algorithm import hv2d, hv3d, hv4d, wfg, bf_approx, bf_fpras, hoy -from ..core._core import population +from PyGMO.util._util import * +from PyGMO.util._util.hv_algorithm import hv2d, hv3d, hv4d, wfg, bf_approx, bf_fpras, hoy, fpl +from PyGMO.core._core import population __all__ = ['hypervolume', 'hv_algorithm'] hv_algorithm.__doc__ = """Module containing available algorithms for the hypervolume computation - USAGE: - hv_algorithm.hv2d() - hv_algorithm.hv3d() - hv_algorithm.hv4d() - hv_algorithm.wfg() - hv_algorithm.bf_approx() - hv_algorithm.bf_fpras() - hv_algorithm.hoy() + USAGE: + hv_algorithm.hv2d() + hv_algorithm.hv3d() + hv_algorithm.hv4d() + hv_algorithm.wfg() + hv_algorithm.bf_approx() + hv_algorithm.bf_fpras() + hv_algorithm.hoy() + hv_algorithm.fpl() """ + class HypervolumeValidation: - """ - Utility class containing commonly raised errors. - Kept in once place to simplify the consistency of error messages across methods - """ - - # Raised when the reference point type is not a list or a tuple, e.g. r = "Foo" - err_rp_type = TypeError("Reference point must be a list/tuple of real numbers, e.g.: r = [1.0, 1.0, 1.0]") - - # Raised when the reference point is a tuple/list but the items are non-castable to float, e.g. r = [1.0, 2.0, 'foo'] - err_rp_items_type = TypeError("Every item in reference point list/tuple must be castable to float, e.g.: r = [1, '2.5', 10e-4]") - - # Raised when the user does not provide a reference point (mandatory in for every method) - err_rp_none = TypeError("Reference point (keyword argument 'r') is mandatory") - - # Raised when the user provides something weird as a hv_algorithm, e.g. hv.compute(r=refp, hv_algorithm="A string") - err_hv_type = TypeError("Hypervolume algorithm must be an instance of a correct type, e.g.: algo = hv_algorithm.wfg()") - - # Raised when the hypervolume object is constructed by anything other than a population object, tuple or a list, e.g. hypervolume("foo bar"), hypervolume([[1,2],[2,"foo"]]) etc. - err_hv_ctor_type = TypeError("Hypervolume object must be constructed from a list/tuple of points or a population object") - - # Raised when the hypervolume object is constructed with an incorrect keyword argument - err_hv_ctor_args = TypeError("Hypervolume takes either exactly one unnamed argument or one keyword argument 'data_src' in the constructor") - - # types of hypervolume algorithms - types_hv_algo = (hv2d, hv3d, hv4d, wfg, bf_approx, bf_fpras, hoy) - - # allowed types for the refernce point - types_rp = (list, tuple,) - - @classmethod - def handle_refpoint(cls, hypvol, r): - """ - Common way of validation for the reference point being passed as parameter to 'compute', 'exclusive' and 'least_contributor' methods. - 1. Check if user provided the reference point (mandatory) - 2. Make sure that the reference point is of correct type - 3. Make sure that items of the reference point vector are castable to float - """ - if r: - if not any(isinstance(r, T) for T in cls.types_rp): - raise cls.err_rp_type - try: - r = [float(ri) for ri in r] - except ValueError: - raise cls.err_rp_items_type - else: - raise cls.err_rp_none - return r - - @classmethod - def validate_hv_algorithm(cls, algorithm): - """ - Common way of validation for the hv_algorithm object being passed as parameter to 'compute', 'exclusive' and 'least_contributor' methods, as well as the SMS-EMOA algorithm. - """ - if not any(isinstance(algorithm, T) for T in cls.types_hv_algo): - raise cls.err_hv_type - return algorithm - - -def _hypervolume_ctor(self, data_src = None, verify = True, *args, **kwargs): - """ - Constructs a hypervolume object used for the computation of hypervolue and exclusive hypervolume. - - Object can be constructed from the population object, or from a fixed list/tuple of points - Points within a fixed list must all be of equal size, of dimension larger than 1. - - USAGE: - from PyGMO import * - from PyGMO.util import * - hv = hypervolume(pop) # Constructs the hypervolume object from individual's fitness vectors - hv = hypervolume([[1,1,2],[2,1,2],[2,2,3]]) - hv = hypervolume(((1,2), (3,0.5), (1.5, 1.5)) - hv = hypervolume(data_src = ((1,2),(2,3)), verify=False) - """ - if not data_src or len(args) > 0 or len(kwargs) > 0: - raise HypervolumeValidation.err_hv_ctor_args - - allowed_types = (population, list, tuple,) - if not any(isinstance(data_src, T) for T in allowed_types): - raise HypervolumeValidation.err_hv_ctor_type - - args = [] - args.append(data_src) - args.append(verify) - try: - return self._original_init(*args) - except TypeError: - raise HypervolumeValidation.err_hv_ctor_type + + """ + Utility class containing commonly raised errors. + Kept in once place to simplify the consistency of error messages across methods + """ + + # Raised when the reference point type is not a list or a tuple, e.g. r = + # "Foo" + err_rp_type = TypeError( + "Reference point must be a list/tuple of real numbers, e.g.: r = [1.0, 1.0, 1.0]") + + # Raised when the reference point is a tuple/list but the items are + # non-castable to float, e.g. r = [1.0, 2.0, 'foo'] + err_rp_items_type = TypeError( + "Every item in reference point list/tuple must be castable to float, e.g.: r = [1, '2.5', 10e-4]") + + # Raised when the user does not provide a reference point (mandatory in + # for every method) + err_rp_none = TypeError( + "Reference point (keyword argument 'r') is mandatory") + + # Raised when the user provides something weird as a hv_algorithm, e.g. + # hv.compute(r=refp, hv_algorithm="A string") + err_hv_type = TypeError( + "Hypervolume algorithm must be an instance of a correct type, e.g.: algo = hv_algorithm.wfg()") + + # Raised when the hypervolume object is constructed by anything other than + # a population object, tuple or a list, e.g. hypervolume("foo bar"), + # hypervolume([[1,2],[2,"foo"]]) etc. + err_hv_ctor_type = TypeError( + "Hypervolume object must be constructed from a list/tuple of points or a population object") + + # Raised when the hypervolume object is constructed with an incorrect + # keyword argument + err_hv_ctor_args = TypeError( + "Hypervolume takes either exactly one unnamed argument or one keyword argument 'data_src' in the constructor") + + # types of hypervolume algorithms + types_hv_algo = (hv2d, hv3d, hv4d, wfg, bf_approx, bf_fpras, hoy, fpl) + + # allowed types for the refernce point + types_rp = (list, tuple,) + + @classmethod + def handle_refpoint(cls, hypvol, r): + """ + Common way of validation for the reference point being passed as parameter to 'compute', 'exclusive' and 'least_contributor' methods. + 1. Check if user provided the reference point (mandatory) + 2. Make sure that the reference point is of correct type + 3. Make sure that items of the reference point vector are castable to float + """ + if r: + if not any(isinstance(r, T) for T in cls.types_rp): + raise cls.err_rp_type + try: + r = [float(ri) for ri in r] + except ValueError: + raise cls.err_rp_items_type + else: + raise cls.err_rp_none + return r + + @classmethod + def validate_hv_algorithm(cls, algorithm): + """ + Common way of validation for the hv_algorithm object being passed as parameter to 'compute', 'exclusive' and 'least_contributor' methods, as well as the SMS-EMOA algorithm. + """ + if not any(isinstance(algorithm, T) for T in cls.types_hv_algo): + raise cls.err_hv_type + return algorithm + + +def _hypervolume_ctor(self, data_src=None, verify=True, *args, **kwargs): + """ + Constructs a hypervolume object used for the computation of hypervolue and exclusive hypervolume. + + Object can be constructed from the population object, or from a fixed list/tuple of points + Points within a fixed list must all be of equal size, of dimension larger than 1. + + USAGE: + from PyGMO import * + from PyGMO.util import * + hv = hypervolume(pop) # Constructs the hypervolume object from individual's fitness vectors + hv = hypervolume([[1,1,2],[2,1,2],[2,2,3]]) + hv = hypervolume(((1,2), (3,0.5), (1.5, 1.5)) + hv = hypervolume(data_src = ((1,2),(2,3)), verify=False) + """ + if not data_src or len(args) > 0 or len(kwargs) > 0: + raise HypervolumeValidation.err_hv_ctor_args + + allowed_types = (population, list, tuple,) + if not any(isinstance(data_src, T) for T in allowed_types): + raise HypervolumeValidation.err_hv_ctor_type + + args = [] + args.append(data_src) + args.append(verify) + try: + return self._original_init(*args) + except TypeError: + raise HypervolumeValidation.err_hv_ctor_type hypervolume._original_init = hypervolume.__init__ hypervolume.__init__ = _hypervolume_ctor -def _hypervolume_compute(self, r = None, algorithm = None, *args, **kwargs): - """ - Compute the hypervolume indicator for a given reference point, using the provided hypervolume algorithm. - Type 'hv_algorithm?' for a list of available hypervolume algorithms. - - USAGE: - hv.compute(r=[5.0]*2) - hv.compute(r=[5.0]*2, algorithm = hv_algorithm.hv2d()) - * r - reference point used for computation - * algorithm (optional) - hypervolume algorithm used for the computation, uses the best performing algorithm for given dimension by default. - """ - if len(args) > 0 or len(kwargs) > 0: - raise TypeError("Incorrect combination of args/kwargs, type 'hypervolume.compute?' for usage") - - r = HypervolumeValidation.handle_refpoint(self, r) - args = [] - args.append(r) - if algorithm: - algorithm = HypervolumeValidation.validate_hv_algorithm(algorithm) - args.append(algorithm) - return self._original_compute(*args) + +def _hypervolume_compute(self, r=None, algorithm=None, *args, **kwargs): + """ + Compute the hypervolume indicator for a given reference point, using the provided hypervolume algorithm. + Type 'hv_algorithm?' for a list of available hypervolume algorithms. + + USAGE: + hv.compute(r=[5.0]*2) + hv.compute(r=[5.0]*2, algorithm = hv_algorithm.hv2d()) + * r - reference point used for computation + * algorithm (optional) - hypervolume algorithm used for the computation, uses the best performing algorithm for given dimension by default. + """ + if len(args) > 0 or len(kwargs) > 0: + raise TypeError( + "Incorrect combination of args/kwargs, type 'hypervolume.compute?' for usage") + + r = HypervolumeValidation.handle_refpoint(self, r) + args = [] + args.append(r) + if algorithm: + algorithm = HypervolumeValidation.validate_hv_algorithm(algorithm) + args.append(algorithm) + return self._original_compute(*args) hypervolume._original_compute = hypervolume.compute hypervolume.compute = _hypervolume_compute -def _hypervolume_exclusive(self, p_idx = None, r = None, algorithm = None, *args, **kwargs): - """ - Compute the exlusive contribution to the total hypervolume by the point at index p_idx, given a reference point and the provided hypervolume algorithm. - Type 'hv_algorithm?' for a list of available hypervolume algorithms. - - USAGE: - hv.exclusive(p_idx=0, r=[5.0]*2) - hv.exclusive(p_idx=0, r=[5.0]*2, algorithm=hv_algorithm.hv2d()) - * p_idx - index of the point - * r - reference point used for computation - * algorithm (optional) - hypervolume algorithm used for the computation, uses the best performing algorithm for given dimension by default - """ - if p_idx == None: - raise TypeError("p_idx (non-negative integer) argument is required for computation, type 'hypervolume.exclusive?' for usage.") - if len(args) > 0 or len(kwargs) > 0: - raise TypeError("Incorrect combination of args/kwargs, type 'hypervolume.exclusive?' for usage.") - - if not isinstance(p_idx, int) or p_idx < 0: - raise TypeError("individual index (p_idx) must be a non-negative integer") - - r = HypervolumeValidation.handle_refpoint(self, r) - - args = [] - args.append(p_idx) - args.append(r) - if algorithm: - algorithm = HypervolumeValidation.validate_hv_algorithm(algorithm) - args.append(algorithm) - return self._original_exclusive(*args) + +def _hypervolume_exclusive( + self, + p_idx=None, + r=None, + algorithm=None, + *args, + **kwargs): + """ + Compute the exlusive contribution to the total hypervolume by the point at index p_idx, given a reference point and the provided hypervolume algorithm. + Type 'hv_algorithm?' for a list of available hypervolume algorithms. + + USAGE: + hv.exclusive(p_idx=0, r=[5.0]*2) + hv.exclusive(p_idx=0, r=[5.0]*2, algorithm=hv_algorithm.hv2d()) + * p_idx - index of the point + * r - reference point used for computation + * algorithm (optional) - hypervolume algorithm used for the computation, uses the best performing algorithm for given dimension by default + """ + if p_idx is None: + raise TypeError( + "p_idx (non-negative integer) argument is required for computation, type 'hypervolume.exclusive?' for usage.") + if len(args) > 0 or len(kwargs) > 0: + raise TypeError( + "Incorrect combination of args/kwargs, type 'hypervolume.exclusive?' for usage.") + + if not isinstance(p_idx, int) or p_idx < 0: + raise TypeError( + "individual index (p_idx) must be a non-negative integer") + + r = HypervolumeValidation.handle_refpoint(self, r) + + args = [] + args.append(p_idx) + args.append(r) + if algorithm: + algorithm = HypervolumeValidation.validate_hv_algorithm(algorithm) + args.append(algorithm) + return self._original_exclusive(*args) hypervolume._original_exclusive = hypervolume.exclusive hypervolume.exclusive = _hypervolume_exclusive -def _hypervolume_least_contributor(self, r = None, algorithm = None, *args, **kwargs): - """ - Find the least contributing point among the pareto front approximation. - Type 'hv_algorithm?' for a list of available hypervolume algorithms. - - USAGE: - hv.least_contributor(r=[5.0]*3) - hv.least_contributor(r=[5.0]*3, algorithm=hv_algorithm.hv3d()) - * r - reference point used for computation - * algorithm (optional) - hypervolume algorithm used for the computation, uses the best performing algorithm for given dimension by default - """ - - if len(args) > 0 or len(kwargs) > 0: - raise TypeError("Incorrect combination of args/kwargs, type 'hypervolume.least_contributor?' for usage") - r = HypervolumeValidation.handle_refpoint(self, r) - args = [] - args.append(r) - if algorithm: - algorithm = HypervolumeValidation.validate_hv_algorithm(algorithm) - args.append(algorithm) - return self._original_least_contributor(*args) + +def _hypervolume_least_contributor( + self, + r=None, + algorithm=None, + *args, + **kwargs): + """ + Find the least contributing point among the pareto front approximation. + Type 'hv_algorithm?' for a list of available hypervolume algorithms. + + USAGE: + hv.least_contributor(r=[5.0]*3) + hv.least_contributor(r=[5.0]*3, algorithm=hv_algorithm.hv3d()) + * r - reference point used for computation + * algorithm (optional) - hypervolume algorithm used for the computation, uses the best performing algorithm for given dimension by default + """ + + if len(args) > 0 or len(kwargs) > 0: + raise TypeError( + "Incorrect combination of args/kwargs, type 'hypervolume.least_contributor?' for usage") + r = HypervolumeValidation.handle_refpoint(self, r) + args = [] + args.append(r) + if algorithm: + algorithm = HypervolumeValidation.validate_hv_algorithm(algorithm) + args.append(algorithm) + return self._original_least_contributor(*args) hypervolume._original_least_contributor = hypervolume.least_contributor hypervolume.least_contributor = _hypervolume_least_contributor -def _hypervolume_greatest_contributor(self, r = None, algorithm = None, *args, **kwargs): - """ - Find the least contributing point among the pareto front approximation. - Type 'hv_algorithm?' for a list of available hypervolume algorithms. - - USAGE: - hv.greatest_contributor(r=[5.0]*3) - hv.greatest_contributor(r=[5.0]*3, algorithm=hv_algorithm.hv3d()) - * r - reference point used for computation - * algorithm (optional) - hypervolume algorithm used for the computation, uses the best performing algorithm for given dimension by default - """ - - if len(args) > 0 or len(kwargs) > 0: - raise TypeError("Incorrect combination of args/kwargs, type 'hypervolume.greatest_contributor?' for usage") - r = HypervolumeValidation.handle_refpoint(self, r) - args = [] - args.append(r) - if algorithm: - algorithm = HypervolumeValidation.validate_hv_algorithm(algorithm) - args.append(algorithm) - return self._original_greatest_contributor(*args) + +def _hypervolume_greatest_contributor( + self, + r=None, + algorithm=None, + *args, + **kwargs): + """ + Find the least contributing point among the pareto front approximation. + Type 'hv_algorithm?' for a list of available hypervolume algorithms. + + USAGE: + hv.greatest_contributor(r=[5.0]*3) + hv.greatest_contributor(r=[5.0]*3, algorithm=hv_algorithm.hv3d()) + * r - reference point used for computation + * algorithm (optional) - hypervolume algorithm used for the computation, uses the best performing algorithm for given dimension by default + """ + + if len(args) > 0 or len(kwargs) > 0: + raise TypeError( + "Incorrect combination of args/kwargs, type 'hypervolume.greatest_contributor?' for usage") + r = HypervolumeValidation.handle_refpoint(self, r) + args = [] + args.append(r) + if algorithm: + algorithm = HypervolumeValidation.validate_hv_algorithm(algorithm) + args.append(algorithm) + return self._original_greatest_contributor(*args) hypervolume._original_greatest_contributor = hypervolume.greatest_contributor hypervolume.greatest_contributor = _hypervolume_greatest_contributor -def _hypervolume_contributions(self, r = None, algorithm = None, *args, **kwargs): - """ - Find the contributions to the hypervolume by each point. - Type 'hv_algorithm?' for a list of available hypervolume algorithms. - - USAGE: - hv.contributions(r=[5.0]*3) - hv.contributions(r=[5.0]*3, algorithm=hv_algorithm.hv3d()) - * r - reference point used for computation - * algorithm (optional) - hypervolume algorithm used for the computation, uses the best performing algorithm for given dimension by default - """ - - if len(args) > 0 or len(kwargs) > 0: - raise TypeError("Incorrect combination of args/kwargs, type 'hypervolume.contributions?' for usage") - r = HypervolumeValidation.handle_refpoint(self, r) - args = [] - args.append(r) - if algorithm: - algorithm = HypervolumeValidation.validate_hv_algorithm(algorithm) - args.append(algorithm) - return self._original_contributions(*args) + +def _hypervolume_contributions(self, r=None, algorithm=None, *args, **kwargs): + """ + Find the contributions to the hypervolume by each point. + Type 'hv_algorithm?' for a list of available hypervolume algorithms. + + USAGE: + hv.contributions(r=[5.0]*3) + hv.contributions(r=[5.0]*3, algorithm=hv_algorithm.hv3d()) + * r - reference point used for computation + * algorithm (optional) - hypervolume algorithm used for the computation, uses the best performing algorithm for given dimension by default + """ + + if len(args) > 0 or len(kwargs) > 0: + raise TypeError( + "Incorrect combination of args/kwargs, type 'hypervolume.contributions?' for usage") + r = HypervolumeValidation.handle_refpoint(self, r) + args = [] + args.append(r) + if algorithm: + algorithm = HypervolumeValidation.validate_hv_algorithm(algorithm) + args.append(algorithm) + return self._original_contributions(*args) hypervolume._original_contributions = hypervolume.contributions hypervolume.contributions = _hypervolume_contributions -def _hypervolume_get_nadir_point(self, eps = 0.0): - """ - Return Nadir point for given set of points. - USAGE: - hv.nadir_point(eps = 10.0) - * eps (optional) - value added to every objective in order to assert a strong dominance of reference point (1.0 by default). - """ - try: - eps = float(eps) - except ValueError: - raise TypeError("Epsilon must be castable to float.") +def _hypervolume_get_nadir_point(self, eps=0.0): + """ + Return Nadir point for given set of points. - if eps < 0.0: - raise ValueError("Epsilon must be a positive value.") + USAGE: + hv.nadir_point(eps = 10.0) + * eps (optional) - value added to every objective in order to assert a strong dominance of reference point (1.0 by default). + """ + try: + eps = float(eps) + except ValueError: + raise TypeError("Epsilon must be castable to float.") - return self._original_get_nadir_point(eps) + if eps < 0.0: + raise ValueError("Epsilon must be a positive value.") + + return self._original_get_nadir_point(eps) hypervolume._original_get_nadir_point = hypervolume.get_nadir_point hypervolume.get_nadir_point = _hypervolume_get_nadir_point + def _hypervolume_set_copy_points(self, b): - """ - Determine whether the hypervolume object should make a copy of points before doing the computation. - In most cases, only the first computation behaves as expected. - Used in cases when the hypervolume object is to be used a 'single use' instance only. + """ + Determine whether the hypervolume object should make a copy of points before doing the computation. + In most cases, only the first computation behaves as expected. + Used in cases when the hypervolume object is to be used a 'single use' instance only. - USAGE: - hv.set_copy_points(True) - """ - if not isinstance(b, bool): - raise TypeError("Argument must be of type 'bool'") + USAGE: + hv.set_copy_points(True) + """ + if not isinstance(b, bool): + raise TypeError("Argument must be of type 'bool'") - return self._original_set_copy_points(b) + return self._original_set_copy_points(b) hypervolume._original_set_copy_points = hypervolume.set_copy_points hypervolume.set_copy_points = _hypervolume_set_copy_points + def _hypervolume_set_verify(self, b): - """ - Determines whether the hypervolume object should verify whether the set of points and the reference point meet certain conditions. + """ + Determines whether the hypervolume object should verify whether the set of points and the reference point meet certain conditions. - USAGE: - hv.set_verify(True) - """ - if not isinstance(b, bool): - raise TypeError("Argument must be of type 'bool'") + USAGE: + hv.set_verify(True) + """ + if not isinstance(b, bool): + raise TypeError("Argument must be of type 'bool'") - return self._original_set_verify(b) + return self._original_set_verify(b) hypervolume._original_set_verify = hypervolume.set_verify hypervolume.set_verify = _hypervolume_set_verify + def _hv2d_ctor(self): - """ - Hypervolume algorithm: hv2d. - Computational complexity: O(n*logn) - Applicable to hypervolume computation problems of dimension=2 - - USAGE: - hv = hypervolume(...) # see 'hypervolume?' for usage - refpoint=[1.0]*2 - hv.compute(r=refpoint, algorithm=hv_algorithm.hv2d()) - hv.exclusive(p_idx=13, refpoint, algorithm=hv_algorithm.hv2d()) - hv.least_contributor(r=refpoint, algorithm=hv_algorithm.hv2d()) - """ - return self._original_init() + """ + Hypervolume algorithm: hv2d. + Computational complexity: O(n*logn) + Applicable to hypervolume computation problems of dimension=2 + + USAGE: + hv = hypervolume(...) # see 'hypervolume?' for usage + refpoint=[1.0]*2 + hv.compute(r=refpoint, algorithm=hv_algorithm.hv2d()) + hv.exclusive(p_idx=13, refpoint, algorithm=hv_algorithm.hv2d()) + hv.least_contributor(r=refpoint, algorithm=hv_algorithm.hv2d()) + """ + return self._original_init() hv2d._original_init = hv2d.__init__ hv2d.__init__ = _hv2d_ctor + def _hv3d_ctor(self): - """ - Hypervolume algorithm: hv3d. - This class contains the implementation of efficient hypervolume algorithms for 3 dimensions. - Computational complexity: O(n*logn) - Applicable to hypervolume computation problems of dimension=3 - - REF: "On the Complexity of Computing the Hypervolume Indicator", Nicola Beume, Carlos M. Fonseca, Manuel Lopez-Ibanez, - Luis Paquete, Jan Vahrenhold. IEEE TRANSACTIONS ON EVOLUTIONARY COMPUTTATION. VOL. 13, NO. 5, OCTOBER 2009 - - REF: "Computing hypervolume contribution in low dimensions: asymptotically optimal algorithm and complexity results", Michael T. M. Emmerich, Carlos M. Fonseca - - USAGE: - hv = hypervolume(...) # see 'hypervolume?' for usage - refpoint = [1.0]*3 - hv.compute(r=refpoint, algorithm=hv_algorithm.hv3d()) - hv.exclusive(p_idx=13, r=refpoint, algorithm=hv_algorithm.hv3d()) - hv.least_contributor(r=refpoint, algorithm=hv_algorithm.hv3d()) - """ - return self._original_init() + """ + Hypervolume algorithm: hv3d. + This class contains the implementation of efficient hypervolume algorithms for 3 dimensions. + Computational complexity: O(n*logn) + Applicable to hypervolume computation problems of dimension=3 + + REF: "On the Complexity of Computing the Hypervolume Indicator", Nicola Beume, Carlos M. Fonseca, Manuel Lopez-Ibanez, + Luis Paquete, Jan Vahrenhold. IEEE TRANSACTIONS ON EVOLUTIONARY COMPUTTATION. VOL. 13, NO. 5, OCTOBER 2009 + + REF: "Computing hypervolume contribution in low dimensions: asymptotically optimal algorithm and complexity results", Michael T. M. Emmerich, Carlos M. Fonseca + + USAGE: + hv = hypervolume(...) # see 'hypervolume?' for usage + refpoint = [1.0]*3 + hv.compute(r=refpoint, algorithm=hv_algorithm.hv3d()) + hv.exclusive(p_idx=13, r=refpoint, algorithm=hv_algorithm.hv3d()) + hv.least_contributor(r=refpoint, algorithm=hv_algorithm.hv3d()) + """ + return self._original_init() hv3d._original_init = hv3d.__init__ hv3d.__init__ = _hv3d_ctor + def _hv4d_ctor(self): - """ - Hypervolume algorithm: HV4d. - Computational complexity: O(n^2) - Applicable to hypervolume computation problems of dimension=4 - - REF: Andreia P. Guerreiro, Carlos M. Fonseca, Michael T. Emmerich, "A Fast Dimension-Sweep Algorithm for the Hypervolume Indicator in Four Dimensions", - CCCG 2012, Charlottetown, P.E.I., August 8–10, 2012. - - USAGE: - hv = hypervolume(...) # see 'hypervolume?' for usage - refpoint = [1.0]*4 - hv.compute(r=refpoint, algorithm=hv_algorithm.hv4d()) - hv.exclusive(p_idx=13, r=refpoint, algorithm=hv_algorithm.hv4d()) - hv.least_contributor(r=refpoint, algorithm=hv_algorithm.hv4d()) - """ - return self._original_init() + """ + Hypervolume algorithm: HV4d. + Computational complexity: O(n^2) + Applicable to hypervolume computation problems of dimension=4 + + REF: Andreia P. Guerreiro, Carlos M. Fonseca, Michael T. Emmerich, "A Fast Dimension-Sweep Algorithm for the Hypervolume Indicator in Four Dimensions", + CCCG 2012, Charlottetown, P.E.I., August 8–10, 2012. + + USAGE: + hv = hypervolume(...) # see 'hypervolume?' for usage + refpoint = [1.0]*4 + hv.compute(r=refpoint, algorithm=hv_algorithm.hv4d()) + hv.exclusive(p_idx=13, r=refpoint, algorithm=hv_algorithm.hv4d()) + hv.least_contributor(r=refpoint, algorithm=hv_algorithm.hv4d()) + """ + return self._original_init() hv4d._original_init = hv4d.__init__ hv4d.__init__ = _hv4d_ctor + +def _fpl_ctor(self): + """ + Hypervolume algorithm: FPL. + Computational complexity: O(n ^ (d - 2) * log(n)) + Applicable to hypervolume computation problems of arbitrary dimension. + + USAGE: + hv = hypervolume(...) # see 'hypervolume?' for usage + refpoint = [1.0]*5 + hv.compute(r=refpoint, algorithm=hv_algorithm.fpl()) + hv.exclusive(p_idx=13, r=refpoint, algorithm=hv_algorithm.fpl()) + hv.least_contributor(r=refpoint, algorithm=hv_algorithm.fpl()) + """ + return self._original_init() +fpl._original_init = fpl.__init__ +fpl.__init__ = _fpl_ctor + + def _hoy_ctor(self): - """ - Hypervolume algorithm: HOY. - Computational complexity: O(n*log(n) + n^(d/2)) - Applicable to hypervolume computation problems of dimension in [2, ..] - - - REF: Nicola Beume and Guenter Rudolph, "Faster S-Metric Calculation by Considering Dominated Hypervolume as Klee's Measure Problem.", - In: B. Kovalerchuk (ed.): Proceedings of the Second IASTED Conference on Computational Intelligence (CI 2006), pp. 231-236. ACTA Press: Anaheim, 2006. - - USAGE: - hv = hypervolume(...) # see 'hypervolume?' for usage - refpoint = [1.0]*5 - hv.compute(r=refpoint, algorithm=hv_algorithm.hoy()) - hv.exclusive(p_idx=13, r=refpoint, algorithm=hv_algorithm.hoy()) - hv.least_contributor(r=refpoint, algorithm=hv_algorithm.hoy()) - """ - return self._original_init() + """ + Hypervolume algorithm: HOY. + Computational complexity: O(n * log(n) + n ^ (d / 2)) + Applicable to hypervolume computation problems of dimension in [2, ..] + + + REF: Nicola Beume and Guenter Rudolph, "Faster S-Metric Calculation by Considering Dominated Hypervolume as Klee's Measure Problem.", + In: B. Kovalerchuk (ed.): Proceedings of the Second IASTED Conference on Computational Intelligence (CI 2006), pp. 231-236. ACTA Press: Anaheim, 2006. + + USAGE: + hv = hypervolume(...) # see 'hypervolume?' for usage + refpoint = [1.0]*5 + hv.compute(r=refpoint, algorithm=hv_algorithm.hoy()) + hv.exclusive(p_idx=13, r=refpoint, algorithm=hv_algorithm.hoy()) + hv.least_contributor(r=refpoint, algorithm=hv_algorithm.hoy()) + """ + return self._original_init() hoy._original_init = hoy.__init__ hoy.__init__ = _hoy_ctor -def _wfg_ctor(self, stop_dimension = 2): - """ - Hypervolume algorithm: WFG. - Applicable to hypervolume computation problems of dimension in [2, ..] - - REF: "A Fast Way of Calculating Exact Hypervolumes", Lyndon While, Lucas Bradstreet, Luigi Barone. - IEEE TRANSACXTIONS ON EVOLUTIONARY COMPUTATION, VOL. 16, NO. 1, FEBRURARY 2012 - - USAGE: - hv = hypervolume(...) # see 'hypervolume?' for usage - refpoint = [1.0]*7 - hv.compute(r=refpoint, algorithm=hv_algorithm.wfg()) - hv.exclusive(p_idx=13,r=refpoint, algorithm=hv_algorithm.wfg()) - hv.least_contributor(r=refpoint, algorithm=hv_algorithm.wfg()) - """ - args = [] - args.append(stop_dimension) - return self._original_init(*args) + +def _wfg_ctor(self, stop_dimension=2): + """ + Hypervolume algorithm: WFG. + Applicable to hypervolume computation problems of dimension in [2, ..] + + REF: "A Fast Way of Calculating Exact Hypervolumes", Lyndon While, Lucas Bradstreet, Luigi Barone. + IEEE TRANSACXTIONS ON EVOLUTIONARY COMPUTATION, VOL. 16, NO. 1, FEBRURARY 2012 + + USAGE: + hv = hypervolume(...) # see 'hypervolume?' for usage + refpoint = [1.0]*7 + hv.compute(r=refpoint, algorithm=hv_algorithm.wfg()) + hv.exclusive(p_idx=13,r=refpoint, algorithm=hv_algorithm.wfg()) + hv.least_contributor(r=refpoint, algorithm=hv_algorithm.wfg()) + """ + args = [] + args.append(stop_dimension) + return self._original_init(*args) wfg._original_init = wfg.__init__ wfg.__init__ = _wfg_ctor -def _bf_approx_ctor(self, use_exact = True, trivial_subcase_size = 1, eps = 1e-1, delta = 1e-4, gamma = 0.25, delta_multiplier = 0.775, initial_delta_coeff = 1e-1, alpha = 0.2): - """ - Hypervolume algorithm: Bringmann-Friedrich approximation. - - Default values for the parameters of the algorithm were obtained from the shark implementation of the algorithm: - http://image.diku.dk/shark/doxygen_pages/html/_least_contributor_approximator_8hpp_source.html - - REF: "Approximating the least hypervolume contributor: NP-hard in general, but fast in practice", Karl Bringmann, Tobias Friedrich. - - USAGE: - * use_exact - should bf_approx use exact methods for computation - * trivial_subcase_size - when the number of points overlapping the bounding box is smaller or equal to that argument, we compute the exlusive hypervolume exactly - * eps - accuracy of approximation - * delta - confidence of approximation - * gamma - constant used for computation of delta for each of the points during the sampling - * delta_multiplier - factor with which delta diminishes each round - * initial_delta_coeff - initial coefficient multiplied by the delta at round 0 - * alpha - coefficicient stating how accurately current lowest contributor should be sampled - hv = hypervolume(...) # see 'hypervolume?' for usage - refpoint = [1.0]*7 - hv.least_contributor(r=refpoint, algorithm=hv_algorithm.bf_approx()) - """ - - args = [] - args.append(use_exact) - args.append(trivial_subcase_size) - args.append(eps) - args.append(delta) - args.append(delta_multiplier) - args.append(alpha) - args.append(initial_delta_coeff) - args.append(gamma) - return self._original_init(*args) + +def _bf_approx_ctor( + self, + use_exact=True, + trivial_subcase_size=1, + eps=1e-2, + delta=1e-6, + gamma=0.25, + delta_multiplier=0.775, + initial_delta_coeff=0.1, + alpha=0.2): + """ + Hypervolume algorithm: Bringmann-Friedrich approximation. + + Default values for the parameters of the algorithm were obtained from the shark implementation of the algorithm: + http://image.diku.dk/shark/doxygen_pages/html/_least_contributor_approximator_8hpp_source.html + + REF: "Approximating the least hypervolume contributor: NP-hard in general, but fast in practice", Karl Bringmann, Tobias Friedrich. + + USAGE: + * use_exact - should bf_approx use exact methods for computation + * trivial_subcase_size - when the number of points overlapping the bounding box is smaller or equal to that argument, we compute the exlusive hypervolume exactly + * eps - accuracy of approximation + * delta - confidence of approximation + * gamma - constant used for computation of delta for each of the points during the sampling + * delta_multiplier - factor with which delta diminishes each round + * initial_delta_coeff - initial coefficient multiplied by the delta at round 0 + * alpha - coefficicient stating how accurately current lowest contributor should be sampled + hv = hypervolume(...) # see 'hypervolume?' for usage + refpoint = [1.0]*7 + hv.least_contributor(r=refpoint, algorithm=hv_algorithm.bf_approx()) + """ + + args = [] + args.append(use_exact) + args.append(trivial_subcase_size) + args.append(eps) + args.append(delta) + args.append(delta_multiplier) + args.append(alpha) + args.append(initial_delta_coeff) + args.append(gamma) + return self._original_init(*args) bf_approx._original_init = bf_approx.__init__ bf_approx.__init__ = _bf_approx_ctor -def _bf_fpras_ctor(self, eps = 1e-2, delta = 1e-2): - """ - Hypervolume algorithm: Bringmann-Friedrich approximation. - Default values for the parameters of the algorithm were obtained from the shark implementation of the algorithm: - http://image.diku.dk/shark/doxygen_pages/html/_least_contributor_approximator_8hpp_source.html +def _bf_fpras_ctor(self, eps=1e-2, delta=1e-2): + """ + Hypervolume algorithm: Bringmann-Friedrich approximation. + + Default values for the parameters of the algorithm were obtained from the shark implementation of the algorithm: + http://image.diku.dk/shark/doxygen_pages/html/_least_contributor_approximator_8hpp_source.html - REF: "Approximating the volume of unions and intersections of high-dimensional geometric objects", Karl Bringmann, Tobias Friedrich. + REF: "Approximating the volume of unions and intersections of high-dimensional geometric objects", Karl Bringmann, Tobias Friedrich. - USAGE: - * eps - accuracy of approximation - * delta - confidence of approximation + USAGE: + * eps - accuracy of approximation + * delta - confidence of approximation - hv = hypervolume(...) # see 'hypervolume?' for usage - refpoint = [1.0]*7 - hv.least_contributor(r=refpoint, algorithm=hv_algorithm.bf_fpras()) - """ + hv = hypervolume(...) # see 'hypervolume?' for usage + refpoint = [1.0]*7 + hv.least_contributor(r=refpoint, algorithm=hv_algorithm.bf_fpras()) + """ - args = [] - args.append(eps) - args.append(delta) - return self._original_init(*args) + args = [] + args.append(eps) + args.append(delta) + return self._original_init(*args) bf_fpras._original_init = bf_fpras.__init__ bf_fpras.__init__ = _bf_fpras_ctor def _race_pop_ctor(self, population=None, seed=0): - """ - Constructs a racing object responsible for racing individuals in a population - - USAGE: race_pop(population, seed=0) - - * population: The population containint the individuals to be raced - * seed: Seed of the racing object - - """ - # We set the defaults or the kwargs - arg_list=[] - if(population != None): - arg_list.append(population) - arg_list.append(seed) - self._orig_init(*arg_list) + """ + Constructs a racing object responsible for racing individuals in a population + + USAGE: race_pop(population, seed=0) + + * population: The population containint the individuals to be raced + * seed: Seed of the racing object + + """ + # We set the defaults or the kwargs + arg_list = [] + if(population is not None): + arg_list.append(population) + arg_list.append(seed) + self._orig_init(*arg_list) race_pop._orig_init = race_pop.__init__ race_pop.__init__ = _race_pop_ctor @@ -481,163 +562,187 @@ def _race_pop_ctor(self, population=None, seed=0): # enum _util.race_pop.termination_condition = _util._termination_condition -def _race_pop_run(self, n_final, min_trials=0, max_count=500, delta=0.05, racers_idx=[], term_cond=race_pop.termination_condition.MAX_BUDGET, race_best=True, screen_output=False): - """ - Start a race among the individuals - - Returns a tuple of winning indices and consumed objective function evaluation. - - USAGE: race_pop.run(n_final, min_trials=0, max_count=500, delta=0.05, racers_idx=[], term_cond=util.race_pop.termination_condition.MAX_BUDGET, race_best=True, screen_output=False): - - * n_final: The desired number of winners when race ends - * min_trials: Each individuals be evaluated at least this number of times before being dropped - * max_count: The allow number of function evaluations (MAX_BUDGET), or the maximum number of data points to be considered for each individual (MAX_DATA_COUNT) - * delta: Confidence level of the statistical test - * racers_idx: Indices of the individuals to be raced, empty means to race all individuals - * term_cond: Can be util.race_pop.termination_condition.MAX_BUDGET or util.race_pop.termination_condition.MAX_DATA_COUNT - * race_best: When True winners are the best, otherwise winners are the worst - * screen_output: Log racing stats at each iteration onto the screen - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(n_final) - arg_list.append(min_trials) - arg_list.append(max_count) - arg_list.append(delta) - arg_list.append(racers_idx) - arg_list.append(term_cond) - arg_list.append(race_best) - arg_list.append(screen_output) - return self._orig_run(*arg_list) + +def _race_pop_run( + self, + n_final, + min_trials=0, + max_count=500, + delta=0.05, + racers_idx=[], + term_cond=race_pop.termination_condition.MAX_BUDGET, + race_best=True, + screen_output=False): + """ + Start a race among the individuals + + Returns a tuple of winning indices and consumed objective function evaluation. + + USAGE: race_pop.run(n_final, min_trials=0, max_count=500, delta=0.05, racers_idx=[], term_cond=util.race_pop.termination_condition.MAX_BUDGET, race_best=True, screen_output=False): + + * n_final: The desired number of winners when race ends + * min_trials: Each individuals be evaluated at least this number of times before being dropped + * max_count: The allow number of function evaluations (MAX_BUDGET), or the maximum number of data points to be considered for each individual (MAX_DATA_COUNT) + * delta: Confidence level of the statistical test + * racers_idx: Indices of the individuals to be raced, empty means to race all individuals + * term_cond: Can be util.race_pop.termination_condition.MAX_BUDGET or util.race_pop.termination_condition.MAX_DATA_COUNT + * race_best: When True winners are the best, otherwise winners are the worst + * screen_output: Log racing stats at each iteration onto the screen + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(n_final) + arg_list.append(min_trials) + arg_list.append(max_count) + arg_list.append(delta) + arg_list.append(racers_idx) + arg_list.append(term_cond) + arg_list.append(race_best) + arg_list.append(screen_output) + return self._orig_run(*arg_list) race_pop._orig_run = race_pop.run race_pop.run = _race_pop_run + def _race_pop_size(self): - """ - Returns the number of individuals contained in the underlying population + """ + Returns the number of individuals contained in the underlying population - USAGE: race_pop.size() - """ - return self._orig_size() + USAGE: race_pop.size() + """ + return self._orig_size() race_pop._orig_size = race_pop.size race_pop.size = _race_pop_size + def _race_pop_reset_cache(self): - """ - Clears the cached fitness and constraint vectors. - """ - return self._orig_reset_cache + """ + Clears the cached fitness and constraint vectors. + """ + return self._orig_reset_cache race_pop._orig_reset_cache = race_pop.reset_cache race_pop.reset_cache = _race_pop_reset_cache + def _race_pop_register_pop(self, pop): - """Load a population into the race environment. - - This step is required before the calling to run(), if during construction - no population was supplied. - - * pop Population to be registered. Racing will operate over this population. - """ - return self._orig_register_pop(pop) + """Load a population into the race environment. + + This step is required before the calling to run(), if during construction + no population was supplied. + + * pop Population to be registered. Racing will operate over this population. + """ + return self._orig_register_pop(pop) race_pop._orig_register_pop = race_pop.register_pop race_pop.register_pop = _race_pop_register_pop + def _race_pop_inherit_memory(self, race_pop_src): - """Transfer compatible evaluation history from another race_pop object. - - The source race_pop object and the current race_pop object must have the - same seed. Upon calling, the current race_pop object will inherit - evaluation history of individuals who also happen to reside in source. + """Transfer compatible evaluation history from another race_pop object. - USAGE: race_pop.inherit_memory(race_pop_src) + The source race_pop object and the current race_pop object must have the + same seed. Upon calling, the current race_pop object will inherit + evaluation history of individuals who also happen to reside in source. - * race_pop_src: The source *race_pop* object from which compatible evaluation history will be transferred to current object - """ - return self._orig_inherit_memory(race_pop_src) + USAGE: race_pop.inherit_memory(race_pop_src) + + * race_pop_src: The source *race_pop* object from which compatible evaluation history will be transferred to current object + """ + return self._orig_inherit_memory(race_pop_src) race_pop._orig_inherit_memory = race_pop.inherit_memory race_pop.inherit_memory = _race_pop_inherit_memory + def _race_pop_get_mean_fitness(self, ind_list=[]): - """ - Returns the average fitness value of the individuals based on the evaluation history + """ + Returns the average fitness value of the individuals based on the evaluation history - * ind_list: The indices of the individuals whose mean fitness vectors are to be extracted. If this is empty, mean data of all the individuals will be returned. - """ - return self._orig_get_mean_fitness(ind_list) + * ind_list: The indices of the individuals whose mean fitness vectors are to be extracted. If this is empty, mean data of all the individuals will be returned. + """ + return self._orig_get_mean_fitness(ind_list) race_pop._orig_get_mean_fitness = race_pop.get_mean_fitness race_pop.get_mean_fitness = _race_pop_get_mean_fitness + def _race_pop_set_seed(self, seed): - """ - Reset the seed for racing. + """ + Reset the seed for racing. - * seed: The new seed to be set. This automatically clears the evaluation cache. - """ - return self._orig_set_seed(seed) + * seed: The new seed to be set. This automatically clears the evaluation cache. + """ + return self._orig_set_seed(seed) race_pop._orig_set_seed = race_pop.set_seed race_pop.set_seed = _race_pop_set_seed + def _race_algo_ctor(self, algo_list, probs, pop_size=100, seed=0): - """ - Construct the racing object responsible for racing algorithms - - * algo_list: The algorithms to be raced - * probs: Can be a single PyGMO problem or a list of them - * pop_size: All the algorithms will be evolving internally some random population of this size - * seed: Seed of the race - """ - # We set the defaults or the kwargs - arg_list=[] - - algo_vec = vector_of_algorithm_base_ptr() - algo_vec.extend(algo_list) - arg_list.append(algo_vec) - - try: - l = len(probs); - prob_vec = vector_of_problem_base_ptr() - prob_vec.extend(probs) - arg_list.append(prob_vec) - except TypeError: - arg_list.append(probs) - - arg_list.append(pop_size) - arg_list.append(seed) - - self._orig_init(*arg_list) + """ + Construct the racing object responsible for racing algorithms + + * algo_list: The algorithms to be raced + * probs: Can be a single PyGMO problem or a list of them + * pop_size: All the algorithms will be evolving internally some random population of this size + * seed: Seed of the race + """ + # We set the defaults or the kwargs + arg_list = [] + + algo_vec = vector_of_algorithm_base_ptr() + algo_vec.extend(algo_list) + arg_list.append(algo_vec) + + try: + l = len(probs) + prob_vec = vector_of_problem_base_ptr() + prob_vec.extend(probs) + arg_list.append(prob_vec) + except TypeError: + arg_list.append(probs) + + arg_list.append(pop_size) + arg_list.append(seed) + + self._orig_init(*arg_list) race_algo._orig_init = race_algo.__init__ race_algo.__init__ = _race_algo_ctor -def _race_algo_run(self, n_final, min_trials=0, max_count=500, delta=0.05, racers_idx=[], race_best=True, screen_output=False): - - """ - Start a race among several algorithms - - Returns a tuple of winning indices and the total number of evolve() made. - - USAGE: race_algo.run(n_final, min_trials=0, max_count=500, delta=0.05, racers_idx=[], race_best=True, screen_output=False): - - * n_final: The desired number of winners when race ends - * min_trials: Each algorithms be evaluated at least this number of times before being dropped - * max_count: The allow number of algorithm performance evaluation (i.e. number of calls to evolve) - * delta: Confidence level of the statistical test - * racers_idx: Indices of the algorithms to be raced, empty means to race all algorithms - * race_best: When True winners are the best, otherwise winners are the worst - * screen_output: Log racing stats at each iteration onto the screen - """ - # We set the defaults or the kwargs - arg_list=[] - arg_list.append(n_final) - arg_list.append(min_trials) - arg_list.append(max_count) - arg_list.append(delta) - arg_list.append(racers_idx) - arg_list.append(race_best) - arg_list.append(screen_output) - return self._orig_run(*arg_list) + +def _race_algo_run( + self, + n_final, + min_trials=0, + max_count=500, + delta=0.05, + racers_idx=[], + race_best=True, + screen_output=False): + """ + Start a race among several algorithms + + Returns a tuple of winning indices and the total number of evolve() made. + + USAGE: race_algo.run(n_final, min_trials=0, max_count=500, delta=0.05, racers_idx=[], race_best=True, screen_output=False): + + * n_final: The desired number of winners when race ends + * min_trials: Each algorithms be evaluated at least this number of times before being dropped + * max_count: The allow number of algorithm performance evaluation (i.e. number of calls to evolve) + * delta: Confidence level of the statistical test + * racers_idx: Indices of the algorithms to be raced, empty means to race all algorithms + * race_best: When True winners are the best, otherwise winners are the worst + * screen_output: Log racing stats at each iteration onto the screen + """ + # We set the defaults or the kwargs + arg_list = [] + arg_list.append(n_final) + arg_list.append(min_trials) + arg_list.append(max_count) + arg_list.append(delta) + arg_list.append(racers_idx) + arg_list.append(race_best) + arg_list.append(screen_output) + return self._orig_run(*arg_list) race_algo._orig_run = race_algo.run race_algo.run = _race_algo_run - diff --git a/PyGMO/util/util.cpp b/PyGMO/util/util.cpp index 22c225b5..7fe9e069 100644 --- a/PyGMO/util/util.cpp +++ b/PyGMO/util/util.cpp @@ -94,6 +94,7 @@ void expose_hv_algorithm() algorithm_wrapper("hv2d","hv2d algorithm."); algorithm_wrapper("hv3d","hv3d algorithm."); algorithm_wrapper("hv4d","hv4d algorithm."); + algorithm_wrapper("fpl","FPL algorithm."); algorithm_wrapper("hoy","HOY algorithm."); class_ >("wfg","WFG algorithm.", init()); class_ >("bf_approx","Bringmann-Friedrich approximated algorithm.", diff --git a/cmake_modules/PaGMOPythonSetup.cmake b/cmake_modules/PaGMOPythonSetup.cmake index 681b1717..d722b5a6 100644 --- a/cmake_modules/PaGMOPythonSetup.cmake +++ b/cmake_modules/PaGMOPythonSetup.cmake @@ -20,12 +20,23 @@ # Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -INCLUDE(FindPythonLibs) -# We need the Python interpreter to figure out Python's version in SuckOSX. +# Unset previous values, to allow changing of Python version. INCLUDE(FindPythonInterp) +IF(${PYTHON_VERSION_STRING} STREQUAL ${PYGMO_PYTHON_VERSION}) +ELSE(${PYTHON_VERSION_STRING} STREQUAL ${PYGMO_PYTHON_VERSION}) + UNSET(PYTHON_EXECUTABLE CACHE) + UNSET(PYTHON_DEBUG_LIBRARY CACHE) + UNSET(PYTHON_LIBRARY CACHE) + UNSET(PYTHON_INCLUDE_PATH CACHE) + UNSET(PYTHON_INCLUDE_DIR CACHE) +ENDIF(${PYTHON_VERSION_STRING} STREQUAL ${PYGMO_PYTHON_VERSION}) + +# Find Python executable +FIND_PACKAGE(PythonInterp ${PYGMO_PYTHON_VERSION} REQUIRED) +SET(PYGMO_PYTHON_VERSION ${PYTHON_VERSION_STRING} CACHE STRING "Build PyGMO with specific Python compatibility.") # Find Python libraries -FIND_PACKAGE(PythonLibs REQUIRED) +FIND_PACKAGE(PythonLibs "${PYGMO_PYTHON_VERSION}" REQUIRED) MESSAGE(STATUS "Python libraries: " "${PYTHON_LIBRARIES}") INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH}) MESSAGE(STATUS "Python library: " "${PYTHON_LIBRARY}") diff --git a/cmake_modules/python_packages_dir.py b/cmake_modules/python_packages_dir.py index 4f1dfce0..dc6302fc 100644 --- a/cmake_modules/python_packages_dir.py +++ b/cmake_modules/python_packages_dir.py @@ -1,4 +1,4 @@ import distutils.sysconfig import os -print os.path.split(distutils.sysconfig.get_python_lib())[-1] +print(os.path.split(distutils.sysconfig.get_python_lib())[-1]) diff --git a/doc/doxygen/Doxyfile b/doc/doxygen/Doxyfile index 60d7b913..1de33dd1 100644 --- a/doc/doxygen/Doxyfile +++ b/doc/doxygen/Doxyfile @@ -1,96 +1,121 @@ -# Doxyfile 1.6.1 +# Doxyfile 1.8.6 # This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project +# doxygen (www.doxygen.org) for a project. # -# All text after a hash (#) is considered a comment and will be ignored +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. # The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" ") +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# http://www.gnu.org/software/libiconv for the list of possible encodings. +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded -# by quotes) that should identify the project. +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. PROJECT_NAME = PaGMO -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. PROJECT_NUMBER = 1.1.5 -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is included in +# the documentation. The maximum height of the logo should not exceed 55 pixels +# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo +# to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. OUTPUT_DIRECTORY = . -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create -# 4096 sub-directories (in 2 levels) under the output directory of each output -# format and will distribute the generated files over these directories. -# Enabling this option can be useful when feeding doxygen a huge amount of -# source files, where putting all generated files in the same directory would -# otherwise cause performance problems for the file system. +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, -# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, -# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English -# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, -# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, -# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. OUTPUT_LANGUAGE = English -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. +# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. BRIEF_MEMBER_DESC = YES -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. +# The default value is: YES. REPEAT_BRIEF = YES -# This tag implements a quasi-intelligent brief description abbreviator -# that is used to form the text in various listings. Each string -# in this list, if found as the leading text of the brief description, will be -# stripped from the text and the result after processing the whole list, is -# used as the annotated text. Otherwise, the brief description is used as-is. -# If left blank, the following values are used ("$name" is automatically -# replaced with the name of the entity): "The $name class" "The $name widget" -# "The $name file" "is" "provides" "specifies" "contains" -# "represents" "a" "an" "the" +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief +# doxygen will generate a detailed section even if there is only a brief # description. +# The default value is: NO. ALWAYS_DETAILED_SEC = NO @@ -98,152 +123,204 @@ ALWAYS_DETAILED_SEC = NO # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. +# The default value is: NO. INLINE_INHERITED_MEMB = NO -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. +# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. FULL_PATH_NAMES = NO -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user-defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the -# path to strip. +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. STRIP_FROM_PATH = -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of -# the path mentioned in the documentation of a class, which tells -# the reader which header file to include in order to use a class. -# If left blank only the name of the header file containing the class -# definition is used. Otherwise one should specify the include paths that -# are normally passed to the compiler using the -I flag. +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. STRIP_FROM_INC_PATH = -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful is your file systems -# doesn't support long names like on DOS, Mac, or CD-ROM. +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. SHORT_NAMES = NO -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like regular Qt-style comments -# (thus requiring an explicit @brief command for a brief description.) +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. JAVADOC_AUTOBRIEF = NO -# If the QT_AUTOBRIEF tag is set to YES then Doxygen will -# interpret the first line (until the first dot) of a Qt-style -# comment as the brief description. If set to NO, the comments -# will behave just like regular Qt-style comments (thus requiring -# an explicit \brief command for a brief description.) +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. QT_AUTOBRIEF = NO -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen -# treat a multi-line C++ special comment block (i.e. a block of //! or /// -# comments) as a brief description. This used to be the default behaviour. -# The new default is to treat a multi-line C++ comment block as a detailed -# description. Set this tag to YES if you prefer the old behaviour instead. +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# re-implements. +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. INHERIT_DOCS = YES -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce -# a new page for each member. If set to NO, the documentation of a member will -# be part of the file/class/namespace that contains it. +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a +# new page for each member. If set to NO, the documentation of a member will be +# part of the file/class/namespace that contains it. +# The default value is: NO. SEPARATE_MEMBER_PAGES = NO -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. TAB_SIZE = 8 -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user-defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. ALIASES = -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C -# sources only. Doxygen will then generate output that is more tailored for C. -# For instance, some of the names that are used will be different. The list -# of all members will be omitted, etc. +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. OPTIMIZE_OUTPUT_FOR_C = YES -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java -# sources only. Doxygen will then generate output that is more tailored for -# Java. For instance, namespaces will be presented as packages, qualified -# scopes will look different, etc. +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources only. Doxygen will then generate output that is more tailored for -# Fortran. +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for -# VHDL. +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. OPTIMIZE_OUTPUT_VHDL = NO -# Doxygen selects the parser to use depending on the extension of the files it parses. -# With this tag you can assign which parser to use for a given extension. -# Doxygen has a built-in mapping, but you can override or extend it using this tag. -# The format is ext=language, where ext is a file extension, and language is one of -# the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, -# Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat -# .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), -# use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. +# +# Note For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. EXTENSION_MAPPING = +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by by putting a % sign in front of the word +# or globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should -# set this tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. -# func(std::string) {}). This also make the inheritance and collaboration +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. +# The default value is: NO. BUILTIN_STL_SUPPORT = YES # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. +# The default value is: NO. CPP_CLI_SUPPORT = NO -# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. -# Doxygen will parse them like normal C++ but will assume all classes use public -# instead of private inheritance when no explicit protection keyword is present. +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. SIP_SUPPORT = NO -# For Microsoft's IDL there are propget and propput attributes to indicate getter -# and setter methods for a property. Setting this option to YES (the default) -# will make doxygen to replace the get and set methods by a property in the -# documentation. This will only work if the methods are indeed getting or -# setting a simple type. If this is not the case, or you want to show the -# methods anyway, you should set this option to NO. +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. IDL_PROPERTY_SUPPORT = YES @@ -251,365 +328,464 @@ IDL_PROPERTY_SUPPORT = YES # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. +# The default value is: NO. DISTRIBUTE_GROUP_DOC = NO -# Set the SUBGROUPING tag to YES (the default) to allow class member groups of -# the same type (for instance a group of public functions) to be put as a -# subgroup of that type (e.g. under the Public Functions section). Set it to -# NO to prevent subgrouping. Alternatively, this can be done per class using -# the \nosubgrouping command. +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. SUBGROUPING = YES -# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum -# is documented as struct, union, or enum with the name of the typedef. So +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically -# be useful for C code in case the coding convention dictates that all compound +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. TYPEDEF_HIDES_STRUCT = NO -# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to -# determine which symbols to keep in memory and which to flush to disk. -# When the cache is full, less often used symbols will be written to disk. -# For small to medium size projects (<1000 input files) the default value is -# probably good enough. For larger projects a too small cache size can cause -# doxygen to be busy swapping symbols to and from disk most of the time -# causing a significant performance penality. -# If the system has enough physical memory increasing the cache will improve the -# performance by keeping more symbols in memory. Note that the value works on -# a logarithmic scale so increasing the size by one will rougly double the -# memory usage. The cache size is given by this formula: -# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, -# corresponding to a cache size of 2^16 = 65536 symbols - -SYMBOL_CACHE_SIZE = 0 +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. EXTRACT_ALL = NO -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will +# be included in the documentation. +# The default value is: NO. EXTRACT_PRIVATE = NO -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file will be +# included in the documentation. +# The default value is: NO. EXTRACT_STATIC = NO -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. -# If set to NO only classes defined in header files are included. +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. EXTRACT_LOCAL_CLASSES = YES -# This flag is only useful for Objective-C code. When set to YES local -# methods, which are defined in the implementation section but not in -# the interface are included in the documentation. -# If set to NO (the default) only methods in the interface are included. +# This flag is only useful for Objective-C code. When set to YES local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO only methods in the interface are +# included. +# The default value is: NO. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base -# name of the file that contains the anonymous namespace. By default -# anonymous namespace are hidden. +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. EXTRACT_ANON_NSPACES = NO -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. HIDE_UNDOC_MEMBERS = NO -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these classes will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO these classes will be included in the various overviews. This option has +# no effect if EXTRACT_ALL is enabled. +# The default value is: NO. HIDE_UNDOC_CLASSES = YES -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all -# friend (class|struct|union) declarations. -# If set to NO (the default) these declarations will be included in the -# documentation. +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO these declarations will be +# included in the documentation. +# The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO -# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any -# documentation blocks found inside the body of a function. -# If set to NO (the default) these blocks will be appended to the -# function's detailed documentation block. +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. HIDE_IN_BODY_DOCS = NO -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. INTERNAL_DOCS = NO -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower-case letters. If set to YES upper-case letters are also +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. +# The default value is: system dependent. CASE_SENSE_NAMES = YES -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES the +# scope will be hidden. +# The default value is: NO. HIDE_SCOPE_NAMES = NO -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put a list of the files that are included by a file in the documentation -# of that file. +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. SHOW_INCLUDE_FILES = YES -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. INLINE_INFO = YES -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. +# The default value is: YES. SORT_MEMBER_DOCS = YES -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the -# brief documentation of file, namespace and class members alphabetically -# by member name. If set to NO (the default) the members will appear in -# declaration order. +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. SORT_BRIEF_DOCS = NO -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. SORT_MEMBERS_CTORS_1ST = NO -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the -# hierarchy of group names into alphabetical order. If set to NO (the default) -# the group names will appear in their defined order. +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. SORT_GROUP_NAMES = NO -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be -# sorted by fully-qualified names, including namespaces. If set to -# NO (the default), the class list will be sorted only by class name, -# not including the namespace part. +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the -# alphabetical list. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. SORT_BY_SCOPE_NAME = NO -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the +# todo list. This list is created by putting \todo commands in the +# documentation. +# The default value is: YES. GENERATE_TODOLIST = YES -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. +# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the +# test list. This list is created by putting \test commands in the +# documentation. +# The default value is: YES. GENERATE_TESTLIST = YES -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. +# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. GENERATE_BUGLIST = YES -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or -# disable (NO) the deprecated list. This list is created by putting -# \deprecated commands in the documentation. +# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. GENERATE_DEPRECATEDLIST= YES -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. ENABLED_SECTIONS = -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or define consists of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and defines in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. MAX_INITIALIZER_LINES = 30 -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES the list +# will mention the files that were used to generate the documentation. +# The default value is: YES. SHOW_USED_FILES = YES -# If the sources in your project are distributed over multiple directories -# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy -# in the documentation. The default is NO. - -SHOW_DIRECTORIES = NO - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. -# This will remove the Files entry from the Quick Index and from the -# Folder Tree View (if specified). The default is YES. +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. SHOW_FILES = YES -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the -# Namespaces page. -# This will remove the Namespaces entry from the Quick Index -# and from the Folder Tree View (if specified). The default is YES. +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via -# popen()) the command , where is the value of -# the FILE_VERSION_FILTER tag, and is the name of an input file -# provided by doxygen. Whatever the program writes to standard output -# is used as the file version. See the manual for examples. +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. FILE_VERSION_FILTER = -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by -# doxygen. The layout file controls the global structure of the generated output files -# in an output format independent way. The create the layout file that represents -# doxygen's defaults, run doxygen with the -l option. You can optionally specify a -# file name after the option, if omitted DoxygenLayout.xml will be used as the name -# of the layout file. +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. LAYOUT_FILE = +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. Do not use file names with spaces, bibtex cannot handle them. See +# also \cite for info how to create references. + +CITE_BIB_FILES = + #--------------------------------------------------------------------------- -# configuration options related to warning and progress messages +# Configuration options related to warning and progress messages #--------------------------------------------------------------------------- -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. +# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. WARNINGS = YES -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. +# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. WARN_IF_UNDOCUMENTED = YES -# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some -# parameters in a documented function, or documenting parameters that -# don't exist or using markup commands wrongly. +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. WARN_IF_DOC_ERROR = YES -# This WARN_NO_PARAMDOC option can be abled to get warnings for -# functions that are documented, but have no documentation for their parameters -# or return value. If set to NO (the default) doxygen will only warn about -# wrong or incomplete parameter documentation, but not about the absence of -# documentation. +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO doxygen will only warn about wrong or incomplete parameter +# documentation, but not about the absence of documentation. +# The default value is: NO. WARN_NO_PARAMDOC = NO -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. Optionally the format may contain -# $version, which will be replaced by the version of the file (if it could -# be obtained via FILE_VERSION_FILTER) +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). WARN_LOGFILE = #--------------------------------------------------------------------------- -# configuration options related to the input files +# Configuration options related to the input files #--------------------------------------------------------------------------- -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. INPUT = ../../src # This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is -# also the default input encoding. Doxygen uses libiconv (or the iconv built -# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for -# the list of possible encodings. +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx -# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. FILE_PATTERNS = -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. RECURSIVE = YES -# The EXCLUDE tag can be used to specify files and/or directories that should +# The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. EXCLUDE = ../../src/Eigen -# The EXCLUDE_SYMLINKS tag can be used select whether or not files or -# directories that are symbolic links (a Unix filesystem feature) are excluded +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded # from the input. +# The default value is: NO. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. Note that the wildcards are matched -# against the file with absolute path, so to exclude all test directories -# for example use the pattern */test/* +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* EXCLUDE_PATTERNS = */keplerian_toolbox/* @@ -618,593 +794,1037 @@ EXCLUDE_PATTERNS = */keplerian_toolbox/* # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. EXAMPLE_RECURSIVE = NO -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). IMAGE_PATH = ../files # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. -# If FILTER_PATTERNS is specified, this tag will be -# ignored. +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. -# Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. -# The filters are a list of the form: -# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further -# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER -# is applied to all files. +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse (i.e. when SOURCE_BROWSER is set to YES). +# INPUT_FILTER ) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. FILTER_SOURCE_FILES = NO +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + #--------------------------------------------------------------------------- -# configuration options related to source browsing +# Configuration options related to source browsing #--------------------------------------------------------------------------- -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. -# Note: To get rid of all source code in the generated output, make sure also -# VERBATIM_HEADERS is set to NO. +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. SOURCE_BROWSER = YES -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. INLINE_SOURCES = NO -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C and C++ comments will always remain visible. +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. STRIP_CODE_COMMENTS = YES -# If the REFERENCED_BY_RELATION tag is set to YES -# then for each documented function all documented -# functions referencing it will be listed. +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. REFERENCED_BY_RELATION = NO -# If the REFERENCES_RELATION tag is set to YES -# then for each documented function all documented entities -# called/used by that function will be listed. +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. REFERENCES_RELATION = NO -# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) -# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from -# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will -# link to the source code. -# Otherwise they will link to the documentation. +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES, then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. REFERENCES_LINK_SOURCE = YES -# If the USE_HTAGS tag is set to YES then the references to source code -# will point to the HTML generated by the htags(1) tool instead of doxygen -# built-in source browser. The htags tool is part of GNU's global source -# tagging system (see http://www.gnu.org/software/global/global.html). You -# will need version 4.8.6 or higher. +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. USE_HTAGS = NO -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index +# Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. ALPHABETICAL_INDEX = NO -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. COLS_IN_ALPHA_INDEX = 5 -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = #--------------------------------------------------------------------------- -# configuration options related to the HTML output +# Configuration options related to the HTML output #--------------------------------------------------------------------------- -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. +# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output +# The default value is: YES. GENERATE_HTML = YES -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. HTML_OUTPUT = html -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank -# doxygen will generate files with .html extension. +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. HTML_FILE_EXTENSION = .html -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a # standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. HTML_HEADER = -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. HTML_FOOTER = -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If the tag is left blank doxygen -# will generate a default style sheet. Note that doxygen will try to copy -# the style sheet file to the HTML output directory, so don't put your own -# stylesheet in the HTML output directory as well, or it will be erased! +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. HTML_STYLESHEET = -# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, -# files or namespaces will be aligned in HTML using tables. If set to -# NO a bullet list will be used. - -HTML_ALIGN_MEMBERS = YES +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user- +# defined cascading style sheet that is included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefor more robust against future updates. +# Doxygen will copy the style sheet file to the output directory. For an example +# see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the stylesheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to NO can help when comparing the output of multiple runs. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the -# page has loaded. For this to work a browser that supports -# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox -# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_SECTIONS = NO -# If the GENERATE_DOCSET tag is set to YES, additional index files -# will be generated that can be used as input for Apple's Xcode 3 -# integrated development environment, introduced with OSX 10.5 (Leopard). -# To create a documentation set, doxygen will generate a Makefile in the -# HTML output directory. Running make will produce the docset in that -# directory and running "make install" will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find -# it at startup. -# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_DOCSET = NO -# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the -# feed. A documentation feed provides an umbrella under which multiple -# documentation sets from a single provider (such as a company or product suite) -# can be grouped. +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDNAME = "Doxygen generated docs" -# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that -# should uniquely identify the documentation set bundle. This should be a -# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen -# will append .docset to the name. +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_BUNDLE_ID = org.doxygen.Project -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) -# of the generated HTML documentation. +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_HTMLHELP = NO -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can -# be used to specify the file name of the resulting .chm file. You -# can add a path in front of the file if the result should not be +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be # written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_FILE = -# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can -# be used to specify the location (absolute path including file name) of -# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run -# the HTML help compiler on the generated index.hhp. +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler ( hhc.exe). If non-empty +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). +# The GENERATE_CHI flag controls if a separate .chi index file is generated ( +# YES) or that it should be included in the master .chm file ( NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING -# is used to encode HtmlHelp index (hhk), content (hhc) and project file -# content. +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. +# The BINARY_TOC flag controls whether a binary table of contents is generated ( +# YES) or a normal table of contents ( NO) in the .chm file. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. BINARY_TOC = NO -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the HTML help documentation and to the tree view. +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. TOC_EXPAND = NO -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER -# are set, an additional index file will be generated that can be used as input for -# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated -# HTML documentation. +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_QHP = NO -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can -# be used to specify the file name of the resulting .qch file. -# The path specified is relative to the HTML output folder. +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. QCH_FILE = -# The QHP_NAMESPACE tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#namespace +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. QHP_NAMESPACE = -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#virtual-folders +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. QHP_VIRTUAL_FOLDER = doc -# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. -# For more information please see -# http://doc.trolltech.com/qthelpproject.html#custom-filters +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = -# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see -# Qt Help Project / Custom Filters. +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's -# filter section matches. -# Qt Help Project / Filter Attributes. +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = -# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can -# be used to specify the location of Qt's qhelpgenerator. -# If non-empty doxygen will try to run qhelpgenerator on the generated -# .qhp file. +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = -# The DISABLE_INDEX tag can be used to turn on/off the condensed index at -# top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. DISABLE_INDEX = NO -# This tag can be used to set the number of enum values (range [1..20]) -# that doxygen will group on one line in the generated HTML documentation. - -ENUM_VALUES_PER_LINE = 4 - # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. -# If the tag value is set to YES, a side panel will be generated -# containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). -# Windows users are probably better off using the HTML help feature. +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = NO -# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, -# and Class Hierarchy pages using a tree view instead of an ordered list. +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. -USE_INLINE_TREES = NO +ENUM_VALUES_PER_LINE = 4 -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. TREEVIEW_WIDTH = 250 -# Use this tag to change the font size of Latex formulas included -# as images in the HTML documentation. The default is 10. Note that -# when you change the font size after a successful doxygen run you need -# to manually remove any form_*.png images from the HTML output directory -# to force them to be regenerated. +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_FONTSIZE = 10 -# When the SEARCHENGINE tag is enable doxygen will generate a search box for the HTML output. The underlying search engine uses javascript -# and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP) -# there is already a search function so this one should typically -# be disabled. +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using prerendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /