From 541d3493df7b603fe101b1720eeabb5ec836024d Mon Sep 17 00:00:00 2001 From: Thomas Bittar Date: Thu, 22 Feb 2024 13:08:57 +0100 Subject: [PATCH 01/12] Define tree structure for time blocks and data Define tree structure --- src/andromede/libs/standard.py | 78 ++++++++- src/andromede/simulation/time_block.py | 25 ++- src/andromede/study/data.py | 54 +++++- tests/andromede/test_investment_pathway.py | 181 +++++++++++++++++++++ tests/functional/test_xpansion.py | 130 ++++++++------- 5 files changed, 399 insertions(+), 69 deletions(-) create mode 100644 tests/andromede/test_investment_pathway.py diff --git a/src/andromede/libs/standard.py b/src/andromede/libs/standard.py index 08135f56..c1dd91c8 100644 --- a/src/andromede/libs/standard.py +++ b/src/andromede/libs/standard.py @@ -16,6 +16,7 @@ from andromede.expression import literal, param, var from andromede.expression.expression import ExpressionRange, port_field from andromede.expression.indexing_structure import IndexingStructure +from andromede.model.common import ProblemContext from andromede.model.constraint import Constraint from andromede.model.model import ModelPort, PortFieldDefinition, PortFieldId, model from andromede.model.parameter import float_parameter, int_parameter @@ -41,7 +42,7 @@ ], ) -NODE_WITH_SPILL_AND_ENS_MODEL = model( +NODE_WITH_SPILL_AND_ENS = model( id="NODE_WITH_SPILL_AND_ENS_MODEL", parameters=[float_parameter("spillage_cost"), float_parameter("ens_cost")], variables=[ @@ -231,7 +232,7 @@ .shift(ExpressionRange(-param("d_min_down") + 1, literal(0))) .sum() <= param("nb_units_max").shift(-param("d_min_down")) - var("nb_on"), - ) + ), # It also works by writing ExpressionRange(-param("d_min_down") + 1, 0) as ExpressionRange's __post_init__ wraps integers to literal nodes. However, MyPy does not seem to infer that ExpressionRange's attributes are necessarily of ExpressionNode type and raises an error if the arguments in the constructor are integer (whereas it runs correctly), this why we specify it here with literal(0) instead of 0. ], objective_operational_contribution=(param("cost") * var("generation")) @@ -391,3 +392,76 @@ ], objective_operational_contribution=literal(0), # Implcitement nul ? ) + +""" Simple thermal unit that can be invested on""" +THERMAL_CANDIDATE = model( + id="GEN", + parameters=[ + float_parameter("op_cost", CONSTANT), + float_parameter("invest_cost", CONSTANT), + float_parameter("max_invest", CONSTANT), + ], + variables=[ + float_variable("generation", lower_bound=literal(0)), + float_variable( + "p_max", + lower_bound=literal(0), + upper_bound=param("max_invest"), + structure=CONSTANT, + context=ProblemContext.coupling, + ), + ], + ports=[ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port")], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId("balance_port", "flow"), + definition=var("generation"), + ) + ], + constraints=[ + Constraint(name="Max generation", expression=var("generation") <= var("p_max")) + ], + objective_operational_contribution=(param("op_cost") * var("generation")) + .sum() + .expec(), + objective_investment_contribution=param("invest_cost") * var("p_max"), +) + +""" Simple thermal unit that can be invested on and with already installed capacity""" +THERMAL_CANDIDATE_WITH_ALREADY_INSTALLED_CAPA = model( + id="GEN", + parameters=[ + float_parameter("op_cost", CONSTANT), + float_parameter("invest_cost", CONSTANT), + float_parameter("max_invest", CONSTANT), + float_parameter("already_installed_capa", CONSTANT), + ], + variables=[ + float_variable("generation", lower_bound=literal(0)), + float_variable( + "invested_capa", + lower_bound=literal(0), + upper_bound=param("max_invest"), + structure=CONSTANT, + context=ProblemContext.coupling, + ), + ], + ports=[ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port")], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId("balance_port", "flow"), + definition=var("generation"), + ) + ], + constraints=[ + Constraint( + name="Max generation", + expression=var("generation") + <= param("already_installed_capa") + var("invested_capa"), + ) + ], + objective_operational_contribution=(param("op_cost") * var("generation")) + .sum() + .expec(), + objective_investment_contribution=param("invest_cost") * var("invested_capa"), +) diff --git a/src/andromede/simulation/time_block.py b/src/andromede/simulation/time_block.py index 50e89b89..1b3edccc 100644 --- a/src/andromede/simulation/time_block.py +++ b/src/andromede/simulation/time_block.py @@ -10,8 +10,21 @@ # # This file is part of the Antares project. -from dataclasses import dataclass -from typing import List +from dataclasses import dataclass, field +from typing import List, Optional + + +# TODO: Move keys elsewhere as variables have no sense in this file +@dataclass(eq=True, frozen=True) +class TimestepComponentVariableKey: + """ + Identifies the solver variable for one timestep and one component variable. + """ + + component_id: str + variable_name: str + block_timestep: Optional[int] = None + scenario: Optional[int] = None @dataclass(frozen=True) @@ -24,3 +37,11 @@ class TimeBlock: id: int timesteps: List[int] + + +@dataclass(frozen=True) +class ResolutionNode: + id: str + blocks: List[TimeBlock] + children: List["ResolutionNode"] = field(default_factory=list) + # solution: Dict[TimestepComponentVariableKey, lp.Variable] diff --git a/src/andromede/study/data.py b/src/andromede/study/data.py index 9cb792f8..3b294930 100644 --- a/src/andromede/study/data.py +++ b/src/andromede/study/data.py @@ -12,7 +12,7 @@ from abc import ABC, abstractmethod from dataclasses import dataclass -from typing import Dict +from typing import Dict, Mapping, Optional from andromede.study.network import Network @@ -35,8 +35,18 @@ class ScenarioIndex: @dataclass(frozen=True) class AbstractDataStructure(ABC): - def get_value(self, timestep: int, scenario: int) -> float: - return NotImplemented + + @abstractmethod + def get_value( + self, timestep: int, scenario: int, node_id: Optional[int] = None + ) -> ( + float + ): # Is it necessary to add node_id as arguement here ? Yes if TreeData is to be considered as a child class + """ + Get the data value for a given timestep and scenario at a given node + Implement this method in subclasses as needed. + """ + pass @abstractmethod def check_requirement(self, time: bool, scenario: bool) -> bool: @@ -51,7 +61,9 @@ def check_requirement(self, time: bool, scenario: bool) -> bool: class ConstantData(AbstractDataStructure): value: float - def get_value(self, timestep: int, scenario: int) -> float: + def get_value( + self, timestep: int, scenario: int, node_id: Optional[int] = None + ) -> float: return self.value # ConstantData can be used for time varying or constant models @@ -71,7 +83,9 @@ class TimeSeriesData(AbstractDataStructure): time_series: Dict[TimeIndex, float] - def get_value(self, timestep: int, scenario: int) -> float: + def get_value( + self, timestep: int, scenario: int, node_id: Optional[int] = None + ) -> float: return self.time_series[TimeIndex(timestep)] def check_requirement(self, time: bool, scenario: bool) -> bool: @@ -91,7 +105,9 @@ class ScenarioSeriesData(AbstractDataStructure): scenario_series: Dict[ScenarioIndex, float] - def get_value(self, timestep: int, scenario: int) -> float: + def get_value( + self, timestep: int, scenario: int, node_id: Optional[int] = None + ) -> float: return self.scenario_series[ScenarioIndex(scenario)] def check_requirement(self, time: bool, scenario: bool) -> bool: @@ -111,7 +127,9 @@ class TimeScenarioSeriesData(AbstractDataStructure): time_scenario_series: Dict[TimeScenarioIndex, float] - def get_value(self, timestep: int, scenario: int) -> float: + def get_value( + self, timestep: int, scenario: int, node_id: Optional[int] = None + ) -> float: return self.time_scenario_series[TimeScenarioIndex(timestep, scenario)] def check_requirement(self, time: bool, scenario: bool) -> bool: @@ -121,6 +139,28 @@ def check_requirement(self, time: bool, scenario: bool) -> bool: return time and scenario +@dataclass(frozen=True) +class TreeData(AbstractDataStructure): + data: Mapping[int, AbstractDataStructure] + + def get_value( + self, timestep: int, scenario: int, node_id: Optional[int] = None + ) -> float: + if ( + not node_id + ): # TODO : Could we remove the default None argument for node_id ? + raise ValueError( + "A node_id must be specified to retrieve a value in TreeData." + ) + return self.data[node_id].get_value(timestep, scenario) + + def check_requirement(self, time: bool, scenario: bool) -> bool: + return all( + node_data.check_requirement(time, scenario) + for node_data in self.data.values() + ) + + @dataclass(frozen=True) class ComponentParameterIndex: component_id: str diff --git a/tests/andromede/test_investment_pathway.py b/tests/andromede/test_investment_pathway.py new file mode 100644 index 00000000..bf359601 --- /dev/null +++ b/tests/andromede/test_investment_pathway.py @@ -0,0 +1,181 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. + +import pytest + +from andromede.libs.standard import ( + DEMAND_MODEL, + GENERATOR_MODEL, + NODE_WITH_SPILL_AND_ENS, + THERMAL_CANDIDATE_WITH_ALREADY_INSTALLED_CAPA, +) +from andromede.simulation.optimization import build_problem +from andromede.simulation.optimization_orchestrator import OptimizationOrchestrator +from andromede.simulation.output_values import OutputValues +from andromede.simulation.time_block import ResolutionNode, TimeBlock +from andromede.study.data import ConstantData, DataBase, TreeData +from andromede.study.network import Component, Network, Node, PortRef, create_component + + +@pytest.fixture +def generator() -> Component: + generator = create_component( + model=GENERATOR_MODEL, + id="BASE", + ) + return generator + + +@pytest.fixture +def candidate() -> Component: + candidate = create_component( + model=THERMAL_CANDIDATE_WITH_ALREADY_INSTALLED_CAPA, id="CAND" + ) + return candidate + + +@pytest.fixture +def demand() -> Component: + demand = create_component(model=DEMAND_MODEL, id="D") + return demand + + +@pytest.fixture +def node() -> Node: + node = Node(model=NODE_WITH_SPILL_AND_ENS, id="N") + return node + + +def test_investment_pathway_on_a_tree_with_one_root_two_children( + generator: Component, + candidate: Component, + demand: Component, + node: Node, +) -> None: + """ + Simple generation expansion problem on one node with one thermal candidate. + + The investment pathway needs to be determined on a tree with one root and two children. This means that we need to take a common initial investment decision and two different descisions one the second investment horizon. Within each decision tree node, one timestep and one scenario are considered. + + Root node : + Demand = 300 + Generator : + P_max = 200, + Production cost = 10, + Max investment = 400, + Investment cost = 100 + Unsupplied energy : + Cost = 10000 + + Child 1 : + Demand = 600 + Generator : + P_max = 200, + Production cost = 10, + Max investment = 100, + Investment cost = 50 + Base : + P_max = 200, + Production cost = 5 + Unsupplied energy : + Cost = 10000 + + Child 2 : + Demand = 600 + Generator : + P_max = 200, + Production cost = 10, + Max investment = 100, + Investment cost = 50 + Unsupplied energy : + Cost = 10000 + + Inthe second decision time, demand increases from 300 to 600 in both scenarios. However, investment capacity in the candidate is limited to 100 in the second stage. Investment cost decreases to reflect the effect of a discount rate. + + In case 1, a base unit of capacity 100 has arrived and can produce at the same cost than the candidate. As it is more intersting to invest the latest possible, the optimal solution for scenario 1 is to invest [100, 100]. + + In case 2, there is no base unit and the max investment is 100 in the second stage, therefore if we consider scenario 2 only, as unsupplied energy is very expensive, the best investment is [300, 100] + + But here as we solve on the tree, we need to find the best solution in expectation on the set of paths in the tree. + + With initial investment = 100 : + Total cost = [100 x 100 (investment root) + 10 x 300 (prod root)] + + 0.5 (proba child 1) x [100 x 50 (investment child 1) + 10 x 400 (prod generator) + 5 x 200 (prod base)] + + 0.5 (proba child 2) x [100 x 50 (investment child 2) + 10 x 400 (prod generator) + 1000 x 200 (unsupplied energy)] + = 122 500 + + With initial investment = 300 : + Total cost = [100 x 300 (investment root) + 10 x 300 (prod root)] + + 0.5 (proba child 1) x [10 x 400 (prod generator) + 5 x 200 (prod base)] + + 0.5 (proba child 2) x [100 x 50 (investment child 2) + 10 x 600 (prod generator)] + = 41 000 + + As investing less than 300 in the first stage would increase the unsupplied energy and lead to an increase in overall cost (-1 MW invested in 1st stage => + 1 MW unsp energy => +900/MW cost increase more or less), the optimal solution is to invest : + - 300 at first stage + - 0 in child 1 + - 100 in child 2 + + """ + + # Either we duplicate all network for each node : Lots of duplications + # or we index all data, parameters, variables by the resolution node : Make the data struture dependent of the resolution tree... + + database = DataBase() + database.add_data("N", "spillage", ConstantData(10)) + database.add_data("N", "unsupplied_energy", ConstantData(10000)) + + database.add_data( + "D", + "demand", + TreeData({0: ConstantData(300), 1: ConstantData(600), 2: ConstantData(600)}), + ) + + database.add_data("CAND", "op_cost", ConstantData(10)) + database.add_data("CAND", "already_installed_capa", ConstantData(200)) + database.add_data( + "CAND", + "invest_cost", + TreeData({0: ConstantData(100), 1: ConstantData(50), 2: ConstantData(50)}), + ) + database.add_data( + "CAND", + "max_invest", + TreeData({0: ConstantData(400), 1: ConstantData(100), 2: ConstantData(100)}), + ) + + database.add_data( + "BASE", + "p_max", + TreeData({0: ConstantData(0), 1: ConstantData(200), 2: ConstantData(0)}), + ) + database.add_data("BASE", "cost", ConstantData(5)) + + network = Network("test") + network.add_node(node) + network.add_component(demand) + network.add_component(generator) + network.add_component(candidate) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(generator, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(candidate, "balance_port"), PortRef(node, "balance_port")) + + root = ResolutionNode("2030", [TimeBlock(0, [0])]) + child_1 = ResolutionNode("2040_with_base", [TimeBlock(0, [0])]) + child_2 = ResolutionNode("2040_no_base", [TimeBlock(0, [0])]) + resolution_tree = ResolutionNode(root, [child_1, child_2]) + + scenarios = 1 + + orchestrator = OptimizationOrchestrator(network, database, resolution_tree) + solution_tree = orchestrator.run() + + # Réfléchir à la représentation des variables dans l'arbre diff --git a/tests/functional/test_xpansion.py b/tests/functional/test_xpansion.py index e7e5847b..6df6ca9a 100644 --- a/tests/functional/test_xpansion.py +++ b/tests/functional/test_xpansion.py @@ -12,7 +12,7 @@ import pytest -from andromede.expression.expression import literal, param, port_field, var +from andromede.expression.expression import literal, param, var from andromede.expression.indexing_structure import IndexingStructure from andromede.libs.standard import ( BALANCE_PORT_TYPE, @@ -20,7 +20,8 @@ DEMAND_MODEL, GENERATOR_MODEL, NODE_BALANCE_MODEL, - NODE_WITH_SPILL_AND_ENS_MODEL, + NODE_WITH_SPILL_AND_ENS, + THERMAL_CANDIDATE, ) from andromede.model import ( Constraint, @@ -37,7 +38,6 @@ MergedProblemStrategy, OutputValues, TimeBlock, - build_benders_decomposed_problem, build_problem, ) from andromede.study import ( @@ -61,47 +61,9 @@ @pytest.fixture -def thermal_candidate() -> Model: - THERMAL_CANDIDATE = model( - id="GEN", - parameters=[ - float_parameter("op_cost", CONSTANT), - float_parameter("invest_cost", CONSTANT), - ], - variables=[ - float_variable("generation", lower_bound=literal(0)), - float_variable( - "p_max", - lower_bound=literal(0), - upper_bound=literal(1000), - structure=CONSTANT, - context=COUPLING, - ), - ], - ports=[ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port")], - port_fields_definitions=[ - PortFieldDefinition( - port_field=PortFieldId("balance_port", "flow"), - definition=var("generation"), - ) - ], - constraints=[ - Constraint( - name="Max generation", expression=var("generation") <= var("p_max") - ) - ], - objective_operational_contribution=(param("op_cost") * var("generation")) - .sum() - .expec(), - objective_investment_contribution=param("invest_cost") * var("p_max"), - ) - return THERMAL_CANDIDATE - - -@pytest.fixture -def discrete_candidate() -> Model: - DISCRETE_CANDIDATE = model( - id="DISCRETE", +def wind_cluster_candidate() -> Model: + WIND_CLUSTER_CANDIDATE = model( + id="WIND_CLUSTER", parameters=[ float_parameter("op_cost", CONSTANT), float_parameter("invest_cost", CONSTANT), @@ -158,8 +120,8 @@ def generator() -> Component: @pytest.fixture -def candidate(thermal_candidate: Model) -> Component: - candidate = create_component(model=thermal_candidate, id="CAND") +def candidate() -> Component: + candidate = create_component(model=THERMAL_CANDIDATE, id="CAND") return candidate @@ -169,9 +131,16 @@ def cluster_candidate(discrete_candidate: Model) -> Component: return cluster +@pytest.fixture +def demand() -> Component: + demand = create_component(model=DEMAND_MODEL, id="D") + return demand + + def test_generation_xpansion_single_time_step_single_scenario( generator: Component, candidate: Component, + demand: Component, ) -> None: """ Simple generation expansion problem on one node. One timestep, one scenario, one thermal cluster candidate. @@ -199,11 +168,7 @@ def test_generation_xpansion_single_time_step_single_scenario( database.add_data("CAND", "op_cost", ConstantData(10)) database.add_data("CAND", "invest_cost", ConstantData(490)) - - demand = create_component( - model=DEMAND_MODEL, - id="D", - ) + database.add_data("CAND", "max_invest", ConstantData(1000)) node = Node(model=NODE_BALANCE_MODEL, id="N") network = Network("test") @@ -243,6 +208,7 @@ def test_two_candidates_xpansion_single_time_step_single_scenario( generator: Component, candidate: Component, cluster_candidate: Component, + demand: Component, ) -> None: """ As before, simple generation expansion problem on one node, one timestep and one scenario @@ -273,12 +239,11 @@ def test_two_candidates_xpansion_single_time_step_single_scenario( database.add_data("CAND", "op_cost", ConstantData(10)) database.add_data("CAND", "invest_cost", ConstantData(490)) + database.add_data("CAND", "max_invest", ConstantData(1000)) - database.add_data("DISCRETE", "op_cost", ConstantData(10)) - database.add_data("DISCRETE", "invest_cost", ConstantData(200)) - database.add_data("DISCRETE", "p_max_per_unit", ConstantData(10)) - - demand = create_component(model=DEMAND_MODEL, id="D") + database.add_data("CLUSTER", "op_cost", ConstantData(10)) + database.add_data("CLUSTER", "invest_cost", ConstantData(200)) + database.add_data("CLUSTER", "p_max_per_unit", ConstantData(10)) node = Node(model=NODE_BALANCE_MODEL, id="N") network = Network("test") @@ -316,9 +281,59 @@ def test_two_candidates_xpansion_single_time_step_single_scenario( assert output == expected_output, f"Output differs from expected: {output}" +def test_model_export_xpansion_single_time_step_single_scenario( + generator: Component, + candidate: Component, + cluster_candidate: Component, + demand: Component, +) -> None: + """ + Same test as before but this time we separate master/subproblem and + export the problems in MPS format to be solved by the Benders solver in Xpansion + """ + + database = DataBase() + database.add_data("D", "demand", ConstantData(400)) + + database.add_data("N", "spillage_cost", ConstantData(1)) + database.add_data("N", "ens_cost", ConstantData(501)) + + database.add_data("G1", "p_max", ConstantData(200)) + database.add_data("G1", "cost", ConstantData(45)) + + database.add_data("CAND", "op_cost", ConstantData(10)) + database.add_data("CAND", "invest_cost", ConstantData(490)) + database.add_data("CAND", "max_invest", ConstantData(1000)) + + database.add_data("CLUSTER", "op_cost", ConstantData(10)) + database.add_data("CLUSTER", "invest_cost", ConstantData(200)) + database.add_data("CLUSTER", "p_max_per_unit", ConstantData(10)) + + node = Node(model=NODE_WITH_SPILL_AND_ENS, id="N") + network = Network("test") + network.add_node(node) + network.add_component(demand) + network.add_component(generator) + network.add_component(candidate) + network.add_component(cluster_candidate) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(generator, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(candidate, "balance_port"), PortRef(node, "balance_port")) + network.connect( + PortRef(cluster_candidate, "balance_port"), PortRef(node, "balance_port") + ) + scenarios = 1 + + xpansion = build_benders_decomposed_problem( + network, database, TimeBlock(1, [0]), scenarios + ) + assert xpansion.run() + + def test_generation_xpansion_two_time_steps_two_scenarios( generator: Component, candidate: Component, + demand: Component, ) -> None: """ Same as previous example but with two timesteps and two scenarios, in order to test the correct instantiation of the objective function @@ -363,8 +378,7 @@ def test_generation_xpansion_two_time_steps_two_scenarios( database.add_data("CAND", "op_cost", ConstantData(10)) database.add_data("CAND", "invest_cost", ConstantData(490)) - - demand = create_component(model=DEMAND_MODEL, id="D") + database.add_data("CAND", "max_invest", ConstantData(1000)) node = Node(model=NODE_BALANCE_MODEL, id="N") network = Network("test") From 7328c97277b3c78e37ae33ec0d50bf2571060108 Mon Sep 17 00:00:00 2001 From: Thomas Bittar Date: Thu, 22 Feb 2024 17:56:54 +0100 Subject: [PATCH 02/12] Improve test description --- src/andromede/simulation/time_block.py | 5 +++- tests/andromede/test_investment_pathway.py | 31 +++++++++++++++++----- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/src/andromede/simulation/time_block.py b/src/andromede/simulation/time_block.py index 1b3edccc..9af9dc09 100644 --- a/src/andromede/simulation/time_block.py +++ b/src/andromede/simulation/time_block.py @@ -42,6 +42,9 @@ class TimeBlock: @dataclass(frozen=True) class ResolutionNode: id: str - blocks: List[TimeBlock] + blocks: List[TimeBlock] # Séparer horizon de simu annuel children: List["ResolutionNode"] = field(default_factory=list) # solution: Dict[TimestepComponentVariableKey, lp.Variable] + +class InBetweenMasterDecisionTimeHorizon: + blocks : List[TimeBlock] diff --git a/tests/andromede/test_investment_pathway.py b/tests/andromede/test_investment_pathway.py index bf359601..53926bc3 100644 --- a/tests/andromede/test_investment_pathway.py +++ b/tests/andromede/test_investment_pathway.py @@ -62,11 +62,23 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( node: Node, ) -> None: """ - Simple generation expansion problem on one node with one thermal candidate. + This use case aims at representing the situation where investment decisions are to be made at different, say "planning times". An actualisation rate can be taken into account. - The investment pathway needs to be determined on a tree with one root and two children. This means that we need to take a common initial investment decision and two different descisions one the second investment horizon. Within each decision tree node, one timestep and one scenario are considered. + The novelty compared the actual usage of planning tools, is that the planning decisions at a given time are taken without knowing exactly which "macro-scenario" / hypothesis on the system that will eventually happen (only knowing the probability distribution of these hypothesis). - Root node : + This example models a case where investment decisions have to be made in 2030 and 2040. + - In 2030, we have full knowledge of the existing assets + - In 2040, two equiprobable hypothesis are possible : + - A case where there is no change in the generation assets since 2030 (except te potential investment in 2030) + - A case where a base generation unit is present + + When taking the decision in 2030, we do not know which case will occur in 2040 and we seek the best decision given a risk criterion (the expectation here). + + The value of these models lies in the output for the first decision rather than the decisions at the later stages as the first decisions are related to "what we have to do today" ? + + More specifically, to define the use case, we define the following tree representing the system at the different decision times and hypothesis + + 2030 (root node) : Demand = 300 Generator : P_max = 200, @@ -76,7 +88,7 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( Unsupplied energy : Cost = 10000 - Child 1 : + 2040 with new base (scenario 1) : Demand = 600 Generator : P_max = 200, @@ -89,7 +101,7 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( Unsupplied energy : Cost = 10000 - Child 2 : + 2040 no base (scenario 2) : Demand = 600 Generator : P_max = 200, @@ -99,7 +111,7 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( Unsupplied energy : Cost = 10000 - Inthe second decision time, demand increases from 300 to 600 in both scenarios. However, investment capacity in the candidate is limited to 100 in the second stage. Investment cost decreases to reflect the effect of a discount rate. + In the second decision time, demand increases from 300 to 600 in both scenarios. However, investment capacity in the candidate is limited to 100 in the second stage. Investment cost decreases to reflect the effect of a discount rate. In case 1, a base unit of capacity 100 has arrived and can produce at the same cost than the candidate. As it is more intersting to invest the latest possible, the optimal solution for scenario 1 is to invest [100, 100]. @@ -159,6 +171,11 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( ) database.add_data("BASE", "cost", ConstantData(5)) + # Fonction qui crée les composants / noeud en fonction de l'arbre et du Database initial / modèles + générer les contraintes couplantes temporelles trajectoire + actualisation + + # contraintes industrielles liées à l'arbre ? + # Test mode peigne + # Générer le modèle "couplant" + network = Network("test") network.add_node(node) network.add_component(demand) @@ -169,7 +186,7 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( network.connect(PortRef(candidate, "balance_port"), PortRef(node, "balance_port")) root = ResolutionNode("2030", [TimeBlock(0, [0])]) - child_1 = ResolutionNode("2040_with_base", [TimeBlock(0, [0])]) + child_1 = ResolutionNode("2040_new_base", [TimeBlock(0, [0])]) child_2 = ResolutionNode("2040_no_base", [TimeBlock(0, [0])]) resolution_tree = ResolutionNode(root, [child_1, child_2]) From 743ce1af0acfeb5b47f59d19b0aab3516d9c65c7 Mon Sep 17 00:00:00 2001 From: Thomas Bittar Date: Fri, 23 Feb 2024 18:05:19 +0100 Subject: [PATCH 03/12] Create class to store configuration of a resolution node --- requirements.txt | 1 + src/andromede/simulation/time_block.py | 20 ++++++++------- tests/andromede/test_investment_pathway.py | 30 +++++++++++++++------- 3 files changed, 33 insertions(+), 18 deletions(-) diff --git a/requirements.txt b/requirements.txt index e552b8ed..badd6e69 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,5 @@ numpy==1.24.4 ortools==9.6.2534 protobuf==4.23.3 scipy==1.10.1 +anytree==2.12.1 diff --git a/src/andromede/simulation/time_block.py b/src/andromede/simulation/time_block.py index 9af9dc09..95075747 100644 --- a/src/andromede/simulation/time_block.py +++ b/src/andromede/simulation/time_block.py @@ -11,7 +11,9 @@ # This file is part of the Antares project. from dataclasses import dataclass, field -from typing import List, Optional +from typing import Dict, List, Optional + +from anytree import Node as TreeNode # TODO: Move keys elsewhere as variables have no sense in this file @@ -40,11 +42,11 @@ class TimeBlock: @dataclass(frozen=True) -class ResolutionNode: - id: str - blocks: List[TimeBlock] # Séparer horizon de simu annuel - children: List["ResolutionNode"] = field(default_factory=list) - # solution: Dict[TimestepComponentVariableKey, lp.Variable] - -class InBetweenMasterDecisionTimeHorizon: - blocks : List[TimeBlock] +class InterDecisionTimeScenarioConfig: + blocks: List[TimeBlock] + scenarios: int + + +@dataclass(frozen=True) +class ConfiguredTree: + node_to_config: Dict[TreeNode, InterDecisionTimeScenarioConfig] diff --git a/tests/andromede/test_investment_pathway.py b/tests/andromede/test_investment_pathway.py index 53926bc3..b178e709 100644 --- a/tests/andromede/test_investment_pathway.py +++ b/tests/andromede/test_investment_pathway.py @@ -11,6 +11,7 @@ # This file is part of the Antares project. import pytest +from anytree import Node as TreeNode from andromede.libs.standard import ( DEMAND_MODEL, @@ -18,10 +19,12 @@ NODE_WITH_SPILL_AND_ENS, THERMAL_CANDIDATE_WITH_ALREADY_INSTALLED_CAPA, ) -from andromede.simulation.optimization import build_problem from andromede.simulation.optimization_orchestrator import OptimizationOrchestrator -from andromede.simulation.output_values import OutputValues -from andromede.simulation.time_block import ResolutionNode, TimeBlock +from andromede.simulation.time_block import ( + ConfiguredTree, + InterDecisionTimeScenarioConfig, + TimeBlock, +) from andromede.study.data import ConstantData, DataBase, TreeData from andromede.study.network import Component, Network, Node, PortRef, create_component @@ -185,14 +188,23 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( network.connect(PortRef(generator, "balance_port"), PortRef(node, "balance_port")) network.connect(PortRef(candidate, "balance_port"), PortRef(node, "balance_port")) - root = ResolutionNode("2030", [TimeBlock(0, [0])]) - child_1 = ResolutionNode("2040_new_base", [TimeBlock(0, [0])]) - child_2 = ResolutionNode("2040_no_base", [TimeBlock(0, [0])]) - resolution_tree = ResolutionNode(root, [child_1, child_2]) - scenarios = 1 + time_scenario_config = InterDecisionTimeScenarioConfig( + [TimeBlock(0, [0])], scenarios + ) + + root = TreeNode("2030") + new_base = TreeNode("2040_new_base", parent=root) + no_base = TreeNode("2040_no_base", parent=root) + configured_tree = ConfiguredTree( + { + root: time_scenario_config, + new_base: time_scenario_config, + no_base: time_scenario_config, + } + ) - orchestrator = OptimizationOrchestrator(network, database, resolution_tree) + orchestrator = OptimizationOrchestrator(network, database, configured_tree) solution_tree = orchestrator.run() # Réfléchir à la représentation des variables dans l'arbre From 75b3f37d8f6ccc563c2996f01c32f918dfcf95b4 Mon Sep 17 00:00:00 2001 From: Thomas Bittar Date: Mon, 26 Feb 2024 15:57:29 +0100 Subject: [PATCH 04/12] Work in progress --- src/andromede/libs/standard.py | 4 +-- src/andromede/model/model.py | 2 ++ .../simulation/benders_decomposed.py | 25 +++++++++++-------- src/andromede/simulation/optimization.py | 1 + src/andromede/simulation/time_block.py | 2 ++ tests/andromede/test_investment_pathway.py | 14 ++++++----- tests/functional/test_xpansion.py | 1 + 7 files changed, 30 insertions(+), 19 deletions(-) diff --git a/src/andromede/libs/standard.py b/src/andromede/libs/standard.py index c1dd91c8..f9028bf8 100644 --- a/src/andromede/libs/standard.py +++ b/src/andromede/libs/standard.py @@ -408,7 +408,7 @@ lower_bound=literal(0), upper_bound=param("max_invest"), structure=CONSTANT, - context=ProblemContext.coupling, + context=ProblemContext.COUPLING, ), ], ports=[ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port")], @@ -443,7 +443,7 @@ lower_bound=literal(0), upper_bound=param("max_invest"), structure=CONSTANT, - context=ProblemContext.coupling, + context=ProblemContext.COUPLING, ), ], ports=[ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port")], diff --git a/src/andromede/model/model.py b/src/andromede/model/model.py index 3997f43d..571b69f7 100644 --- a/src/andromede/model/model.py +++ b/src/andromede/model/model.py @@ -19,6 +19,8 @@ from dataclasses import dataclass, field from typing import Dict, Iterable, Optional +from anytree import Node as TreeNode, LevelOrderIter + from andromede.expression import ( AdditionNode, ComparisonNode, diff --git a/src/andromede/simulation/benders_decomposed.py b/src/andromede/simulation/benders_decomposed.py index 08d718f1..0b8f1adb 100644 --- a/src/andromede/simulation/benders_decomposed.py +++ b/src/andromede/simulation/benders_decomposed.py @@ -33,7 +33,7 @@ InvestmentProblemStrategy, OperationalProblemStrategy, ) -from andromede.simulation.time_block import TimeBlock +from andromede.simulation.time_block import ConfiguredTree, TimeBlock from andromede.study.data import DataBase from andromede.study.network import Network from andromede.utils import read_json, serialize, serialize_json @@ -206,8 +206,7 @@ def run( def build_benders_decomposed_problem( network: Network, database: DataBase, - blocks: List[TimeBlock], - scenarios: int, + configured_tree: ConfiguredTree, *, border_management: BlockBorderManagement = BlockBorderManagement.CYCLE, solver_id: str = "GLOP", @@ -222,6 +221,7 @@ def build_benders_decomposed_problem( master = build_problem( network, database, + configured_tree.root, # Could be any node, given the implmentation of get_nodes() null_time_block := TimeBlock( # Not necessary for master, but list must be non-empty 0, [0] ), @@ -234,18 +234,21 @@ def build_benders_decomposed_problem( # Benders Decomposed Sub-problems subproblems = [] - for block in blocks: - subproblems.append( - build_problem( + for ( + tree_node, + time_scenario_config, + ) in configured_tree.node_to_config.items(): + for block in time_scenario_config.blocks: + # Xpansion Sub-problems + subproblems.append(build_problem( network, database, + tree_node, block, - scenarios, - problem_name=f"subproblem_{block.id}", - border_management=border_management, + time_scenario_config.scenarios, + problem_name=f"subproblem_{tree_node.name}_{block.id}", solver_id=solver_id, problem_strategy=OperationalProblemStrategy(), - ) - ) + )) return BendersDecomposedProblem(master, subproblems) diff --git a/src/andromede/simulation/optimization.py b/src/andromede/simulation/optimization.py index 35c50e4a..92216a11 100644 --- a/src/andromede/simulation/optimization.py +++ b/src/andromede/simulation/optimization.py @@ -809,6 +809,7 @@ def export_as_lp(self) -> str: def build_problem( network: Network, database: DataBase, + tree_node_name: str, block: TimeBlock, scenarios: int, *, diff --git a/src/andromede/simulation/time_block.py b/src/andromede/simulation/time_block.py index 95075747..3e71116e 100644 --- a/src/andromede/simulation/time_block.py +++ b/src/andromede/simulation/time_block.py @@ -23,6 +23,7 @@ class TimestepComponentVariableKey: Identifies the solver variable for one timestep and one component variable. """ + tree_node_name: str component_id: str variable_name: str block_timestep: Optional[int] = None @@ -49,4 +50,5 @@ class InterDecisionTimeScenarioConfig: @dataclass(frozen=True) class ConfiguredTree: + root: TreeNode # Could be retrieved easily from any node with node.root, but clearer to identify it separately node_to_config: Dict[TreeNode, InterDecisionTimeScenarioConfig] diff --git a/tests/andromede/test_investment_pathway.py b/tests/andromede/test_investment_pathway.py index b178e709..3e740c37 100644 --- a/tests/andromede/test_investment_pathway.py +++ b/tests/andromede/test_investment_pathway.py @@ -19,7 +19,7 @@ NODE_WITH_SPILL_AND_ENS, THERMAL_CANDIDATE_WITH_ALREADY_INSTALLED_CAPA, ) -from andromede.simulation.optimization_orchestrator import OptimizationOrchestrator +from andromede.simulation.benders_decomposed import build_benders_decomposed_problem from andromede.simulation.time_block import ( ConfiguredTree, InterDecisionTimeScenarioConfig, @@ -77,7 +77,7 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( When taking the decision in 2030, we do not know which case will occur in 2040 and we seek the best decision given a risk criterion (the expectation here). - The value of these models lies in the output for the first decision rather than the decisions at the later stages as the first decisions are related to "what we have to do today" ? + The value of these models lies in the output for the first decision rather than the decisions at the later stages as the first decisions are related to "what we have to do today" ? More specifically, to define the use case, we define the following tree representing the system at the different decision times and hypothesis @@ -174,7 +174,7 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( ) database.add_data("BASE", "cost", ConstantData(5)) - # Fonction qui crée les composants / noeud en fonction de l'arbre et du Database initial / modèles + générer les contraintes couplantes temporelles trajectoire + actualisation + + # Fonction qui crée les composants / noeud en fonction de l'arbre et du Database initial / modèles + générer les contraintes couplantes temporelles trajectoire + actualisation + # contraintes industrielles liées à l'arbre ? # Test mode peigne # Générer le modèle "couplant" @@ -197,14 +197,16 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( new_base = TreeNode("2040_new_base", parent=root) no_base = TreeNode("2040_no_base", parent=root) configured_tree = ConfiguredTree( + root, { root: time_scenario_config, new_base: time_scenario_config, no_base: time_scenario_config, - } + }, ) - orchestrator = OptimizationOrchestrator(network, database, configured_tree) - solution_tree = orchestrator.run() + problems = build_benders_decomposed_problem( + network, database, configured_tree + ) # Réfléchir à la représentation des variables dans l'arbre diff --git a/tests/functional/test_xpansion.py b/tests/functional/test_xpansion.py index 6df6ca9a..6ace4433 100644 --- a/tests/functional/test_xpansion.py +++ b/tests/functional/test_xpansion.py @@ -40,6 +40,7 @@ TimeBlock, build_problem, ) +from andromede.simulation.benders_decomposed import build_benders_decomposed_problem from andromede.study import ( Component, ConstantData, From 76d058dabf154be0b00c3df2bdde3f4cf025968b Mon Sep 17 00:00:00 2001 From: Thomas Bittar Date: Wed, 28 Feb 2024 16:02:51 +0100 Subject: [PATCH 05/12] Design the construction of network on decision tree --- mypy.ini | 3 + src/andromede/model/model.py | 3 +- .../simulation/benders_decomposed.py | 32 ++-- src/andromede/simulation/decision_tree.py | 169 ++++++++++++++++++ src/andromede/simulation/optimization.py | 3 +- src/andromede/simulation/time_block.py | 18 +- src/andromede/study/data.py | 7 +- tests/andromede/test_investment_pathway.py | 16 +- tests/functional/test_xpansion.py | 29 +-- 9 files changed, 231 insertions(+), 49 deletions(-) create mode 100644 src/andromede/simulation/decision_tree.py diff --git a/mypy.ini b/mypy.ini index 8670eab7..9027a095 100644 --- a/mypy.ini +++ b/mypy.ini @@ -5,4 +5,7 @@ disallow_untyped_defs = true disallow_untyped_calls = true [mypy-ortools.*] +ignore_missing_imports = true + +[mypy-anytree.*] ignore_missing_imports = true \ No newline at end of file diff --git a/src/andromede/model/model.py b/src/andromede/model/model.py index 571b69f7..4a38e415 100644 --- a/src/andromede/model/model.py +++ b/src/andromede/model/model.py @@ -19,7 +19,8 @@ from dataclasses import dataclass, field from typing import Dict, Iterable, Optional -from anytree import Node as TreeNode, LevelOrderIter +from anytree import LevelOrderIter +from anytree import Node as TreeNode from andromede.expression import ( AdditionNode, diff --git a/src/andromede/simulation/benders_decomposed.py b/src/andromede/simulation/benders_decomposed.py index 0b8f1adb..eb52cf26 100644 --- a/src/andromede/simulation/benders_decomposed.py +++ b/src/andromede/simulation/benders_decomposed.py @@ -18,6 +18,10 @@ import pathlib from typing import Any, Dict, List, Optional +from anytree import Node as TreeNode + +from andromede.model.model import Model +from andromede.simulation.decision_tree import ConfiguredTree, create_master_network from andromede.simulation.optimization import ( BlockBorderManagement, OptimizationProblem, @@ -204,10 +208,11 @@ def run( def build_benders_decomposed_problem( - network: Network, + network_on_tree: Dict[TreeNode, Network], database: DataBase, configured_tree: ConfiguredTree, *, + decision_coupling_model: Optional[Model] = None, border_management: BlockBorderManagement = BlockBorderManagement.CYCLE, solver_id: str = "GLOP", ) -> BendersDecomposedProblem: @@ -217,9 +222,11 @@ def build_benders_decomposed_problem( Returns a Benders Decomposed problem """ + master_network = create_master_network(network_on_tree, decision_coupling_model) + # Benders Decomposed Master Problem master = build_problem( - network, + master_network, database, configured_tree.root, # Could be any node, given the implmentation of get_nodes() null_time_block := TimeBlock( # Not necessary for master, but list must be non-empty @@ -240,15 +247,16 @@ def build_benders_decomposed_problem( ) in configured_tree.node_to_config.items(): for block in time_scenario_config.blocks: # Xpansion Sub-problems - subproblems.append(build_problem( - network, - database, - tree_node, - block, - time_scenario_config.scenarios, - problem_name=f"subproblem_{tree_node.name}_{block.id}", - solver_id=solver_id, - problem_strategy=OperationalProblemStrategy(), - )) + subproblems.append( + build_problem( + network_on_tree[tree_node], + database, + block, + time_scenario_config.scenarios, + problem_name=f"subproblem_{tree_node.name}_{block.id}", + solver_id=solver_id, + problem_strategy=OperationalProblemStrategy(), + ) + ) return BendersDecomposedProblem(master, subproblems) diff --git a/src/andromede/simulation/decision_tree.py b/src/andromede/simulation/decision_tree.py new file mode 100644 index 00000000..9a42af66 --- /dev/null +++ b/src/andromede/simulation/decision_tree.py @@ -0,0 +1,169 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. + +import dataclasses +from dataclasses import dataclass, field +from typing import Dict, Iterable, List, Optional + +from anytree import LevelOrderIter +from anytree import Node as TreeNode + +from andromede.expression.expression import ExpressionNode +from andromede.model.constraint import Constraint +from andromede.model.model import Model, PortFieldDefinition, PortFieldId, model +from andromede.model.variable import Variable +from andromede.simulation.time_block import TimeBlock +from andromede.study.network import Component, Network, Node, create_component + + +@dataclass(frozen=True) +class InterDecisionTimeScenarioConfig: + blocks: List[TimeBlock] + scenarios: int + + +@dataclass(frozen=True) +class ConfiguredTree: + node_to_config: Dict[TreeNode, InterDecisionTimeScenarioConfig] + root: TreeNode = field(init=False) + + def __post_init__(self) -> None: + # Stores the root, by getting it from any tree node + object.__setattr__(self, "root", next(iter(self.node_to_config.keys())).root) + + +def create_single_node_decision_tree( + blocks: List[TimeBlock], scenarios: int +) -> ConfiguredTree: + time_scenario_config = InterDecisionTimeScenarioConfig(blocks, scenarios) + + root = TreeNode("root") + configured_tree = ConfiguredTree( + { + root: time_scenario_config, + }, + ) + + return configured_tree + + +def _generate_tree_variables( + variables: Dict[str, Variable], tree_node: TreeNode +) -> Iterable[Variable]: + tree_variables = [] + for variable in variables.values(): + tree_variables.append( + dataclasses.replace(variable, name=f"{tree_node.name}_{variable.name}") + ) + return tree_variables + + +def _generate_tree_constraints( + constraints: Dict[str, Constraint], tree: TreeNode +) -> Iterable[Constraint]: + raise NotImplementedError() + + +def _generate_tree_expression( + expression: Optional[ExpressionNode], tree: TreeNode +) -> ExpressionNode: + raise NotImplementedError() + + +def _generate_tree_port_field_definition( + port_field_definition: Dict[PortFieldId, PortFieldDefinition], tree: TreeNode +) -> Iterable[PortFieldDefinition]: + raise NotImplementedError() + + +def _generate_tree_model( + tree_node: TreeNode, component: Component, network_id: str +) -> Model: + variables = _generate_tree_variables( + component.model.variables, + tree_node, + ) + constraints = _generate_tree_constraints(component.model.constraints, tree_node) + binding_constraints = _generate_tree_constraints( + component.model.binding_constraints, tree_node + ) + objective_operational_contribution = _generate_tree_expression( + component.model.objective_operational_contribution, tree_node + ) + objective_investment_contribution = _generate_tree_expression( + component.model.objective_investment_contribution, tree_node + ) + port_fields_definitions = _generate_tree_port_field_definition( + component.model.port_fields_definitions, tree_node + ) + tree_model = model( + id=f"{network_id}_{component.model.id}", + constraints=constraints, + binding_constraints=binding_constraints, + parameters=component.model.parameters.values(), + variables=variables, + objective_operational_contribution=objective_operational_contribution, + objective_investment_contribution=objective_investment_contribution, + inter_block_dyn=component.model.inter_block_dyn, + ports=component.model.ports.values(), + port_fields_definitions=port_fields_definitions, + ) + + return tree_model + + +def _generate_network_on_node(network: Network, tree_node: TreeNode) -> Network: + network_id = tree_node.name + tree_node_network = Network(network_id) + + for component in network.all_components: + tree_node_model = _generate_tree_model( + tree_node, + component, + network_id, + ) + + # It would be nice to have the same treatment for nodes and components as they are actually the same thing... + if isinstance(component, Node): + network_node = Node(tree_node_model, id=f"{network_id}_{component.id}") + tree_node_network.add_node(network_node) + else: + tree_node_component = create_component( + tree_node_model, id=f"{network_id}_{component.id}" + ) + tree_node_network.add_component(tree_node_component) + + for connection in network.connections: + tree_node_network.connect(connection.port1, connection.port2) + return tree_node_network + + +def create_network_on_tree(network: Network, tree: TreeNode) -> Dict[TreeNode, Network]: + # On crée un gros modèle en dupliquant les variables; contraintes, etc à chaque noeud de l'arbre. + # Pour le master on peut : + # - Utiliser uniquement les variables, contraintes, etc dont on va avoir besoin dans la construction du problème -> nécessite déjà d'avoir des infos sur la construction des problèmes alors qu'on agit au niveau modèle ici + # - Dupliquer tout le modèle, permet de mutualiser du code avec la partie composant par noeud et plus lisible. Seul inconvénient, modèle master un peu trop riche, pas besoin des infos "opérationnelles". Mais les modèles ne sont pas très "lourds" donc on peut se le permettre. C'est l'option choisie ici. + if tree.size == 1: + return {tree: network} + else: + node_to_network = {} + for tree_node in LevelOrderIter(tree): + node_to_network[tree_node] = _generate_network_on_node(network, tree_node) + return node_to_network + + +def create_master_network( + tree_node_to_network: Dict[TreeNode, Network], + decision_coupling_model: Optional[Model], +) -> Network: + root = next(iter(tree_node_to_network.keys())).root + return tree_node_to_network[root] diff --git a/src/andromede/simulation/optimization.py b/src/andromede/simulation/optimization.py index 92216a11..60cc318b 100644 --- a/src/andromede/simulation/optimization.py +++ b/src/andromede/simulation/optimization.py @@ -18,7 +18,7 @@ from abc import ABC, abstractmethod from dataclasses import dataclass from enum import Enum -from typing import Dict, Iterable, List, Optional, Type +from typing import Dict, Iterable, List, Optional import ortools.linear_solver.pywraplp as lp @@ -809,7 +809,6 @@ def export_as_lp(self) -> str: def build_problem( network: Network, database: DataBase, - tree_node_name: str, block: TimeBlock, scenarios: int, *, diff --git a/src/andromede/simulation/time_block.py b/src/andromede/simulation/time_block.py index 3e71116e..195ee40c 100644 --- a/src/andromede/simulation/time_block.py +++ b/src/andromede/simulation/time_block.py @@ -10,10 +10,8 @@ # # This file is part of the Antares project. -from dataclasses import dataclass, field -from typing import Dict, List, Optional - -from anytree import Node as TreeNode +from dataclasses import dataclass +from typing import List, Optional # TODO: Move keys elsewhere as variables have no sense in this file @@ -40,15 +38,3 @@ class TimeBlock: id: int timesteps: List[int] - - -@dataclass(frozen=True) -class InterDecisionTimeScenarioConfig: - blocks: List[TimeBlock] - scenarios: int - - -@dataclass(frozen=True) -class ConfiguredTree: - root: TreeNode # Could be retrieved easily from any node with node.root, but clearer to identify it separately - node_to_config: Dict[TreeNode, InterDecisionTimeScenarioConfig] diff --git a/src/andromede/study/data.py b/src/andromede/study/data.py index 3b294930..d1e475f4 100644 --- a/src/andromede/study/data.py +++ b/src/andromede/study/data.py @@ -35,7 +35,6 @@ class ScenarioIndex: @dataclass(frozen=True) class AbstractDataStructure(ABC): - @abstractmethod def get_value( self, timestep: int, scenario: int, node_id: Optional[int] = None @@ -81,7 +80,7 @@ class TimeSeriesData(AbstractDataStructure): can be defined by referencing one of those timeseries by its ID. """ - time_series: Dict[TimeIndex, float] + time_series: Mapping[TimeIndex, float] def get_value( self, timestep: int, scenario: int, node_id: Optional[int] = None @@ -103,7 +102,7 @@ class ScenarioSeriesData(AbstractDataStructure): can be defined by referencing one of those timeseries by its ID. """ - scenario_series: Dict[ScenarioIndex, float] + scenario_series: Mapping[ScenarioIndex, float] def get_value( self, timestep: int, scenario: int, node_id: Optional[int] = None @@ -125,7 +124,7 @@ class TimeScenarioSeriesData(AbstractDataStructure): can be defined by referencing one of those timeseries by its ID. """ - time_scenario_series: Dict[TimeScenarioIndex, float] + time_scenario_series: Mapping[TimeScenarioIndex, float] def get_value( self, timestep: int, scenario: int, node_id: Optional[int] = None diff --git a/tests/andromede/test_investment_pathway.py b/tests/andromede/test_investment_pathway.py index 3e740c37..1b7b0052 100644 --- a/tests/andromede/test_investment_pathway.py +++ b/tests/andromede/test_investment_pathway.py @@ -19,12 +19,14 @@ NODE_WITH_SPILL_AND_ENS, THERMAL_CANDIDATE_WITH_ALREADY_INSTALLED_CAPA, ) +from andromede.model.model import model from andromede.simulation.benders_decomposed import build_benders_decomposed_problem -from andromede.simulation.time_block import ( +from andromede.simulation.decision_tree import ( ConfiguredTree, InterDecisionTimeScenarioConfig, - TimeBlock, + create_network_on_tree, ) +from andromede.simulation.time_block import TimeBlock from andromede.study.data import ConstantData, DataBase, TreeData from andromede.study.network import Component, Network, Node, PortRef, create_component @@ -197,7 +199,6 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( new_base = TreeNode("2040_new_base", parent=root) no_base = TreeNode("2040_no_base", parent=root) configured_tree = ConfiguredTree( - root, { root: time_scenario_config, new_base: time_scenario_config, @@ -205,8 +206,15 @@ def test_investment_pathway_on_a_tree_with_one_root_two_children( }, ) + decision_coupling_model = model("DECISION_COUPLING") + + tree_node_to_network = create_network_on_tree(network, configured_tree.root) + problems = build_benders_decomposed_problem( - network, database, configured_tree + tree_node_to_network, + database, + configured_tree, + decision_coupling_model=decision_coupling_model, ) # Réfléchir à la représentation des variables dans l'arbre diff --git a/tests/functional/test_xpansion.py b/tests/functional/test_xpansion.py index 6ace4433..d02b4e74 100644 --- a/tests/functional/test_xpansion.py +++ b/tests/functional/test_xpansion.py @@ -41,6 +41,10 @@ build_problem, ) from andromede.simulation.benders_decomposed import build_benders_decomposed_problem +from andromede.simulation.decision_tree import ( + create_network_on_tree, + create_single_node_decision_tree, +) from andromede.study import ( Component, ConstantData, @@ -62,9 +66,9 @@ @pytest.fixture -def wind_cluster_candidate() -> Model: - WIND_CLUSTER_CANDIDATE = model( - id="WIND_CLUSTER", +def discrete_candidate() -> Model: + DISCRETE_CANDIDATE = model( + id="DISCRETE", parameters=[ float_parameter("op_cost", CONSTANT), float_parameter("invest_cost", CONSTANT), @@ -242,9 +246,9 @@ def test_two_candidates_xpansion_single_time_step_single_scenario( database.add_data("CAND", "invest_cost", ConstantData(490)) database.add_data("CAND", "max_invest", ConstantData(1000)) - database.add_data("CLUSTER", "op_cost", ConstantData(10)) - database.add_data("CLUSTER", "invest_cost", ConstantData(200)) - database.add_data("CLUSTER", "p_max_per_unit", ConstantData(10)) + database.add_data("DISCRETE", "op_cost", ConstantData(10)) + database.add_data("DISCRETE", "invest_cost", ConstantData(200)) + database.add_data("DISCRETE", "p_max_per_unit", ConstantData(10)) node = Node(model=NODE_BALANCE_MODEL, id="N") network = Network("test") @@ -306,9 +310,9 @@ def test_model_export_xpansion_single_time_step_single_scenario( database.add_data("CAND", "invest_cost", ConstantData(490)) database.add_data("CAND", "max_invest", ConstantData(1000)) - database.add_data("CLUSTER", "op_cost", ConstantData(10)) - database.add_data("CLUSTER", "invest_cost", ConstantData(200)) - database.add_data("CLUSTER", "p_max_per_unit", ConstantData(10)) + database.add_data("DISCRETE", "op_cost", ConstantData(10)) + database.add_data("DISCRETE", "invest_cost", ConstantData(200)) + database.add_data("DISCRETE", "p_max_per_unit", ConstantData(10)) node = Node(model=NODE_WITH_SPILL_AND_ENS, id="N") network = Network("test") @@ -323,10 +327,15 @@ def test_model_export_xpansion_single_time_step_single_scenario( network.connect( PortRef(cluster_candidate, "balance_port"), PortRef(node, "balance_port") ) + + blocks = [TimeBlock(1, [0])] scenarios = 1 + configured_tree = create_single_node_decision_tree(blocks, scenarios) + tree_node_to_network = create_network_on_tree(network, configured_tree.root) + xpansion = build_benders_decomposed_problem( - network, database, TimeBlock(1, [0]), scenarios + tree_node_to_network, database, configured_tree ) assert xpansion.run() From 5a25d3a152b2efa39bd2a192581fb40ee61c17d8 Mon Sep 17 00:00:00 2001 From: Thomas Bittar Date: Wed, 28 Feb 2024 18:35:54 +0100 Subject: [PATCH 06/12] Work in progress --- src/andromede/simulation/decision_tree.py | 19 +++++----- .../test_generate_network_on_tree.py | 35 +++++++++++++++++++ 2 files changed, 43 insertions(+), 11 deletions(-) create mode 100644 tests/andromede/test_generate_network_on_tree.py diff --git a/src/andromede/simulation/decision_tree.py b/src/andromede/simulation/decision_tree.py index 9a42af66..d2769531 100644 --- a/src/andromede/simulation/decision_tree.py +++ b/src/andromede/simulation/decision_tree.py @@ -61,6 +61,7 @@ def _generate_tree_variables( ) -> Iterable[Variable]: tree_variables = [] for variable in variables.values(): + # Works as we do not allow variables in bounds, hence no problem to copy the corresponding expression nodes as is. If we had variables, we would have to replace the variable names by the ones with tree node information. tree_variables.append( dataclasses.replace(variable, name=f"{tree_node.name}_{variable.name}") ) @@ -86,7 +87,8 @@ def _generate_tree_port_field_definition( def _generate_tree_model( - tree_node: TreeNode, component: Component, network_id: str + tree_node: TreeNode, + component: Component, ) -> Model: variables = _generate_tree_variables( component.model.variables, @@ -106,7 +108,7 @@ def _generate_tree_model( component.model.port_fields_definitions, tree_node ) tree_model = model( - id=f"{network_id}_{component.model.id}", + id=f"{tree_node.name}_{component.model.id}", constraints=constraints, binding_constraints=binding_constraints, parameters=component.model.parameters.values(), @@ -122,23 +124,18 @@ def _generate_tree_model( def _generate_network_on_node(network: Network, tree_node: TreeNode) -> Network: - network_id = tree_node.name - tree_node_network = Network(network_id) + tree_node_network = Network(tree_node.name) for component in network.all_components: - tree_node_model = _generate_tree_model( - tree_node, - component, - network_id, - ) + tree_node_model = _generate_tree_model(tree_node, component) # It would be nice to have the same treatment for nodes and components as they are actually the same thing... if isinstance(component, Node): - network_node = Node(tree_node_model, id=f"{network_id}_{component.id}") + network_node = Node(tree_node_model, id=f"{tree_node.name}_{component.id}") tree_node_network.add_node(network_node) else: tree_node_component = create_component( - tree_node_model, id=f"{network_id}_{component.id}" + tree_node_model, id=f"{tree_node.name}_{component.id}" ) tree_node_network.add_component(tree_node_component) diff --git a/tests/andromede/test_generate_network_on_tree.py b/tests/andromede/test_generate_network_on_tree.py new file mode 100644 index 00000000..b2e516a6 --- /dev/null +++ b/tests/andromede/test_generate_network_on_tree.py @@ -0,0 +1,35 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. + + +from anytree import Node as TreeNode + +from andromede.libs.standard import THERMAL_CLUSTER_MODEL_HD +from andromede.simulation.decision_tree import _generate_tree_model +from andromede.study.network import create_component + + +def test_generate_model_on_node() -> None: + thermal = create_component(model=THERMAL_CLUSTER_MODEL_HD, id="thermal") + + tree_node_id = "2030" + tree_node_model = _generate_tree_model(TreeNode(tree_node_id), thermal) + + # How to compare model efficiently with only change in name ? + assert tree_node_model.id == f"{tree_node_id}_{thermal.id}" + + for variable in thermal.model.variables.values(): + assert f"{tree_node_id}_{variable.name}" in tree_node_model.variables + + # Create dedicated function + tree_variable = tree_node_model.variables[f"{tree_node_id}_{variable.name}"] + # assert From abed8a0616c37daca5afe5c46b3b8b1d796aa7a6 Mon Sep 17 00:00:00 2001 From: Thomas Bittar Date: Fri, 8 Mar 2024 08:58:49 +0100 Subject: [PATCH 07/12] Add comments --- src/andromede/simulation/decision_tree.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/andromede/simulation/decision_tree.py b/src/andromede/simulation/decision_tree.py index d2769531..267c71ee 100644 --- a/src/andromede/simulation/decision_tree.py +++ b/src/andromede/simulation/decision_tree.py @@ -69,20 +69,24 @@ def _generate_tree_variables( def _generate_tree_constraints( - constraints: Dict[str, Constraint], tree: TreeNode + constraints: Dict[str, Constraint], tree_node: TreeNode ) -> Iterable[Constraint]: + # Goal is to replace variables in constraint, lower bound and upper bound with node variable raise NotImplementedError() def _generate_tree_expression( - expression: Optional[ExpressionNode], tree: TreeNode + expression: Optional[ExpressionNode], tree_node: TreeNode ) -> ExpressionNode: + # Goal is to replace variables with node variable + # Create a copy visitor to do so raise NotImplementedError() def _generate_tree_port_field_definition( - port_field_definition: Dict[PortFieldId, PortFieldDefinition], tree: TreeNode + port_field_definition: Dict[PortFieldId, PortFieldDefinition], tree_node: TreeNode ) -> Iterable[PortFieldDefinition]: + # Goal is to replace variables in the expression defining the port by node variable raise NotImplementedError() @@ -162,5 +166,7 @@ def create_master_network( tree_node_to_network: Dict[TreeNode, Network], decision_coupling_model: Optional[Model], ) -> Network: + # Current implementation so that tests pass for trees with one investment nodes (in test_xpansion) + # The final implementation should gather all networks from tree nodes and connect the models with the decision coupling model (with ports) root = next(iter(tree_node_to_network.keys())).root return tree_node_to_network[root] From 303bd141815d4b27363bff7c7686884a1176cd81 Mon Sep 17 00:00:00 2001 From: Thomas Bittar Date: Wed, 13 Mar 2024 11:54:59 +0100 Subject: [PATCH 08/12] Fix rebase conflict --- .../simulation/benders_decomposed.py | 3 +-- tests/integration/test_benders_decomposed.py | 26 ++++++++++++------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/andromede/simulation/benders_decomposed.py b/src/andromede/simulation/benders_decomposed.py index eb52cf26..e94d968e 100644 --- a/src/andromede/simulation/benders_decomposed.py +++ b/src/andromede/simulation/benders_decomposed.py @@ -37,7 +37,7 @@ InvestmentProblemStrategy, OperationalProblemStrategy, ) -from andromede.simulation.time_block import ConfiguredTree, TimeBlock +from andromede.simulation.time_block import TimeBlock from andromede.study.data import DataBase from andromede.study.network import Network from andromede.utils import read_json, serialize, serialize_json @@ -228,7 +228,6 @@ def build_benders_decomposed_problem( master = build_problem( master_network, database, - configured_tree.root, # Could be any node, given the implmentation of get_nodes() null_time_block := TimeBlock( # Not necessary for master, but list must be non-empty 0, [0] ), diff --git a/tests/integration/test_benders_decomposed.py b/tests/integration/test_benders_decomposed.py index 38cc06b0..a6011161 100644 --- a/tests/integration/test_benders_decomposed.py +++ b/tests/integration/test_benders_decomposed.py @@ -19,8 +19,7 @@ CONSTANT, DEMAND_MODEL, GENERATOR_MODEL, - NODE_BALANCE_MODEL, - NODE_WITH_SPILL_AND_ENS_MODEL, + NODE_WITH_SPILL_AND_ENS, ) from andromede.model import ( Constraint, @@ -38,6 +37,10 @@ TimeBlock, build_benders_decomposed_problem, ) +from andromede.simulation.decision_tree import ( + create_network_on_tree, + create_single_node_decision_tree, +) from andromede.study import ( Component, ConstantData, @@ -214,7 +217,7 @@ def test_benders_decomposed_integration( demand = create_component(model=DEMAND_MODEL, id="D") - node = Node(model=NODE_WITH_SPILL_AND_ENS_MODEL, id="N") + node = Node(model=NODE_WITH_SPILL_AND_ENS, id="N") network = Network("test") network.add_node(node) network.add_component(demand) @@ -228,9 +231,13 @@ def test_benders_decomposed_integration( PortRef(cluster_candidate, "balance_port"), PortRef(node, "balance_port") ) scenarios = 1 + blocks = [TimeBlock(1, [0])] + + configured_tree = create_single_node_decision_tree(blocks, scenarios) + tree_node_to_network = create_network_on_tree(network, configured_tree.root) xpansion = build_benders_decomposed_problem( - network, database, [TimeBlock(1, [0])], scenarios + tree_node_to_network, database, configured_tree ) data = { @@ -305,7 +312,7 @@ def test_benders_decomposed_multi_time_block_single_scenario( id="D", ) - node = Node(model=NODE_WITH_SPILL_AND_ENS_MODEL, id="N") + node = Node(model=NODE_WITH_SPILL_AND_ENS, id="N") network = Network("test") network.add_node(node) network.add_component(demand) @@ -316,12 +323,13 @@ def test_benders_decomposed_multi_time_block_single_scenario( network.connect(PortRef(candidate, "balance_port"), PortRef(node, "balance_port")) scenarios = 1 + blocks = [TimeBlock(1, [0]), TimeBlock(2, [1])] + + configured_tree = create_single_node_decision_tree(blocks, scenarios) + tree_node_to_network = create_network_on_tree(network, configured_tree.root) xpansion = build_benders_decomposed_problem( - network, - database, - [TimeBlock(1, [0]), TimeBlock(2, [1])], - scenarios, + tree_node_to_network, database, configured_tree ) data = { From 964297a4ff40282da235b8aed48d6ce70856996f Mon Sep 17 00:00:00 2001 From: ianmnz Date: Mon, 22 Jul 2024 14:48:28 +0200 Subject: [PATCH 09/12] Feature/network cloning (#28) * Move pathway tests to functional folder * Add replicate method to classes to be able to copy them to tree * Remove integration test from functional xpansion test * Modify DecisionTree * Move decision_tree test to unit tests folder * Ideas for the coupling_model * Write subproblem with tree * Add first pathway test * Add three node test for pathway * Add prob to tree nodes for expectation computation * Remove test debug init * Add prob to decision tree node * Add ExpectedValueStrategy to Benders * Retour of PR comments --- src/andromede/expression/expression.py | 8 + src/andromede/libs/standard.py | 10 +- src/andromede/model/constraint.py | 56 +- src/andromede/model/model.py | 20 +- src/andromede/model/parameter.py | 6 +- src/andromede/model/variable.py | 7 +- src/andromede/simulation/__init__.py | 1 + .../simulation/benders_decomposed.py | 119 ++-- src/andromede/simulation/decision_tree.py | 199 ++----- src/andromede/simulation/optimization.py | 173 ++++-- src/andromede/simulation/strategy.py | 33 +- src/andromede/study/data.py | 50 +- src/andromede/study/network.py | 48 +- .../test_generate_network_on_tree.py | 35 -- tests/andromede/test_investment_pathway.py | 220 -------- tests/functional/test_investment_pathway.py | 514 ++++++++++++++++++ tests/functional/test_xpansion.py | 62 +-- tests/integration/test_benders_decomposed.py | 28 +- .../test_generate_network_on_tree.py | 101 ++++ tests/unittests/test_model.py | 24 +- 20 files changed, 1049 insertions(+), 665 deletions(-) delete mode 100644 tests/andromede/test_generate_network_on_tree.py delete mode 100644 tests/andromede/test_investment_pathway.py create mode 100644 tests/functional/test_investment_pathway.py create mode 100644 tests/unittests/test_generate_network_on_tree.py diff --git a/src/andromede/expression/expression.py b/src/andromede/expression/expression.py index ffbd4e22..7262326d 100644 --- a/src/andromede/expression/expression.py +++ b/src/andromede/expression/expression.py @@ -222,6 +222,14 @@ def literal(value: float) -> LiteralNode: return LiteralNode(value) +def is_unbound(expr: ExpressionNode) -> bool: + return isinstance(expr, LiteralNode) and (abs(expr.value) == float("inf")) + + +def is_non_negative(expr: ExpressionNode) -> bool: + return isinstance(expr, LiteralNode) and (expr.value >= 0) + + @dataclass(frozen=True, eq=False) class UnaryOperatorNode(ExpressionNode): operand: ExpressionNode diff --git a/src/andromede/libs/standard.py b/src/andromede/libs/standard.py index f9028bf8..48e05d7e 100644 --- a/src/andromede/libs/standard.py +++ b/src/andromede/libs/standard.py @@ -441,10 +441,16 @@ float_variable( "invested_capa", lower_bound=literal(0), - upper_bound=param("max_invest"), structure=CONSTANT, context=ProblemContext.COUPLING, ), + float_variable( + "delta_invest", + lower_bound=literal(0), + upper_bound=param("max_invest"), + structure=CONSTANT, + context=ProblemContext.INVESTMENT, + ), ], ports=[ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port")], port_fields_definitions=[ @@ -463,5 +469,5 @@ objective_operational_contribution=(param("op_cost") * var("generation")) .sum() .expec(), - objective_investment_contribution=param("invest_cost") * var("invested_capa"), + objective_investment_contribution=param("invest_cost") * var("delta_invest"), ) diff --git a/src/andromede/model/constraint.py b/src/andromede/model/constraint.py index 9e7c6050..0f33aa7f 100644 --- a/src/andromede/model/constraint.py +++ b/src/andromede/model/constraint.py @@ -10,19 +10,23 @@ # # This file is part of the Antares project. -from typing import Optional +from dataclasses import dataclass, field, replace +from typing import Any from andromede.expression.degree import is_constant from andromede.expression.expression import ( Comparator, ComparisonNode, ExpressionNode, + is_non_negative, + is_unbound, literal, ) from andromede.expression.print import print_expr from andromede.model.common import ProblemContext +@dataclass class Constraint: """ A constraint linking variables and parameters of a model together. @@ -32,55 +36,45 @@ class Constraint: name: str expression: ExpressionNode - lower_bound: ExpressionNode - upper_bound: ExpressionNode - context: ProblemContext + lower_bound: ExpressionNode = field(default=literal(-float("inf"))) + upper_bound: ExpressionNode = field(default=literal(float("inf"))) + context: ProblemContext = field(default=ProblemContext.OPERATIONAL) - def __init__( + def __post_init__( self, - name: str, - expression: ExpressionNode, - lower_bound: Optional[ExpressionNode] = None, - upper_bound: Optional[ExpressionNode] = None, - context: ProblemContext = ProblemContext.OPERATIONAL, ) -> None: - self.name = name - self.context = context - - if isinstance(expression, ComparisonNode): - if lower_bound is not None or upper_bound is not None: + if isinstance(self.expression, ComparisonNode): + if not is_unbound(self.lower_bound) or not is_unbound(self.upper_bound): raise ValueError( "Both comparison between two expressions and a bound are specfied, set either only a comparison between expressions or a single linear expression with bounds." ) - merged_expr = expression.left - expression.right - self.expression = merged_expr - - if expression.comparator == Comparator.LESS_THAN: + if self.expression.comparator == Comparator.LESS_THAN: # lhs - rhs <= 0 self.upper_bound = literal(0) self.lower_bound = literal(-float("inf")) - elif expression.comparator == Comparator.GREATER_THAN: + elif self.expression.comparator == Comparator.GREATER_THAN: # lhs - rhs >= 0 self.lower_bound = literal(0) self.upper_bound = literal(float("inf")) else: # lhs - rhs == 0 self.lower_bound = literal(0) self.upper_bound = literal(0) + + self.expression = self.expression.left - self.expression.right + else: - for bound in [lower_bound, upper_bound]: - if bound is not None and not is_constant(bound): + for bound in [self.lower_bound, self.upper_bound]: + if not is_constant(bound): raise ValueError( f"The bounds of a constraint should not contain variables, {print_expr(bound)} was given." ) - self.expression = expression - if lower_bound is not None: - self.lower_bound = lower_bound - else: - self.lower_bound = literal(-float("inf")) + if is_unbound(self.lower_bound) and is_non_negative(self.lower_bound): + raise ValueError("Lower bound should not be +Inf") - if upper_bound is not None: - self.upper_bound = upper_bound - else: - self.upper_bound = literal(float("inf")) + if is_unbound(self.upper_bound) and not is_non_negative(self.upper_bound): + raise ValueError("Upper bound should not be -Inf") + + def replicate(self, /, **changes: Any) -> "Constraint": + return replace(self, **changes) diff --git a/src/andromede/model/model.py b/src/andromede/model/model.py index 4a38e415..8871b2ab 100644 --- a/src/andromede/model/model.py +++ b/src/andromede/model/model.py @@ -16,11 +16,8 @@ defining parameters, variables, and equations. """ import itertools -from dataclasses import dataclass, field -from typing import Dict, Iterable, Optional - -from anytree import LevelOrderIter -from anytree import Node as TreeNode +from dataclasses import dataclass, field, replace +from typing import Any, Dict, Iterable, Optional from andromede.expression import ( AdditionNode, @@ -110,12 +107,18 @@ class ModelPort: port_type: PortType port_name: str + def replicate(self, /, **changes: Any) -> "ModelPort": + return replace(self, **changes) + @dataclass(frozen=True) class PortFieldId: port_name: str field_name: str + def replicate(self, /, **changes: Any) -> "PortFieldId": + return replace(self, **changes) + @dataclass(frozen=True) class PortFieldDefinition: @@ -129,6 +132,9 @@ class PortFieldDefinition: def __post_init__(self) -> None: _validate_port_field_expression(self) + def replicate(self, /, **changes: Any) -> "PortFieldDefinition": + return replace(self, **changes) + def port_field_def( port_name: str, field_name: str, definition: ExpressionNode @@ -186,6 +192,10 @@ def get_all_constraints(self) -> Iterable[Constraint]: self.binding_constraints.values(), self.constraints.values() ) + def replicate(self, /, **changes: Any) -> "Model": + # Shallow copy + return replace(self, **changes) + def model( id: str, diff --git a/src/andromede/model/parameter.py b/src/andromede/model/parameter.py index 745d5a40..fdba78c4 100644 --- a/src/andromede/model/parameter.py +++ b/src/andromede/model/parameter.py @@ -10,7 +10,8 @@ # # This file is part of the Antares project. -from dataclasses import dataclass +from dataclasses import dataclass, replace +from typing import Any from andromede.expression.indexing_structure import IndexingStructure from andromede.model.common import ValueType @@ -28,6 +29,9 @@ class Parameter: type: ValueType structure: IndexingStructure + def replicate(self, /, **changes: Any) -> "Parameter": + return replace(self, **changes) + def int_parameter( name: str, diff --git a/src/andromede/model/variable.py b/src/andromede/model/variable.py index 7f65b2c4..ca4ca0cd 100644 --- a/src/andromede/model/variable.py +++ b/src/andromede/model/variable.py @@ -10,8 +10,8 @@ # # This file is part of the Antares project. -from dataclasses import dataclass -from typing import Optional +from dataclasses import dataclass, replace +from typing import Any, Optional from andromede.expression import ExpressionNode from andromede.expression.degree import is_constant @@ -38,6 +38,9 @@ def __post_init__(self) -> None: if self.upper_bound and not is_constant(self.upper_bound): raise ValueError("Upper bounds of variables must be constant") + def replicate(self, /, **changes: Any) -> "Variable": + return replace(self, **changes) + def int_variable( name: str, diff --git a/src/andromede/simulation/__init__.py b/src/andromede/simulation/__init__.py index 23e0a855..46501a29 100644 --- a/src/andromede/simulation/__init__.py +++ b/src/andromede/simulation/__init__.py @@ -14,6 +14,7 @@ BendersDecomposedProblem, build_benders_decomposed_problem, ) +from .decision_tree import DecisionTreeNode, InterDecisionTimeScenarioConfig from .optimization import BlockBorderManagement, OptimizationProblem, build_problem from .output_values import BendersSolution, OutputValues from .runner import BendersRunner, MergeMPSRunner diff --git a/src/andromede/simulation/benders_decomposed.py b/src/andromede/simulation/benders_decomposed.py index e94d968e..57a03ff4 100644 --- a/src/andromede/simulation/benders_decomposed.py +++ b/src/andromede/simulation/benders_decomposed.py @@ -18,14 +18,12 @@ import pathlib from typing import Any, Dict, List, Optional -from anytree import Node as TreeNode - -from andromede.model.model import Model -from andromede.simulation.decision_tree import ConfiguredTree, create_master_network +from andromede.simulation.decision_tree import DecisionTreeNode from andromede.simulation.optimization import ( BlockBorderManagement, OptimizationProblem, build_problem, + fusion_problems, ) from andromede.simulation.output_values import ( BendersDecomposedSolution, @@ -34,6 +32,7 @@ ) from andromede.simulation.runner import BendersRunner, MergeMPSRunner from andromede.simulation.strategy import ( + ExpectedValue, InvestmentProblemStrategy, OperationalProblemStrategy, ) @@ -53,6 +52,7 @@ class BendersDecomposedProblem: emplacement: pathlib.Path output_path: pathlib.Path + structure_filename: str solution: Optional[BendersSolution] is_merged: bool @@ -63,12 +63,14 @@ def __init__( subproblems: List[OptimizationProblem], emplacement: str = "outputs/lp", output_path: str = "expansion", + struct_filename: str = "structure.txt", ) -> None: self.master = master self.subproblems = subproblems self.emplacement = pathlib.Path(emplacement) self.output_path = pathlib.Path(output_path) + self.structure_filename = struct_filename self.solution = None self.is_merged = False @@ -79,7 +81,6 @@ def export_structure(self) -> str: """ if not self.subproblems: - # TODO For now, only one master and one subproblem raise RuntimeError("Subproblem list must have at least one sub problem") # A mapping similar to the Xpansion mapping for keeping track of variable indexes @@ -89,11 +90,10 @@ def export_structure(self) -> str: problem_to_candidates["master"] = {} for solver_var_info in self.master.context._solver_variables.values(): - if solver_var_info.is_in_objective: - problem_to_candidates["master"][ - solver_var_info.name - ] = solver_var_info.column_id - candidates.add(solver_var_info.name) + problem_to_candidates["master"][ + solver_var_info.name + ] = solver_var_info.column_id + candidates.add(solver_var_info.name) for problem in self.subproblems: problem_to_candidates[problem.name] = {} @@ -126,8 +126,8 @@ def export_options( "TRACE": True, "SLAVE_WEIGHT": "CONSTANT", "SLAVE_WEIGHT_VALUE": 1, - "MASTER_NAME": "master", - "STRUCTURE_FILE": "structure.txt", + "MASTER_NAME": f"{self.master.name}", + "STRUCTURE_FILE": f"{self.structure_filename}", "INPUTROOT": ".", "CSV_NAME": "benders_output_trace", "BOUND_ALPHA": True, @@ -144,19 +144,23 @@ def export_options( } return options_values - def prepare( + def initialise( self, *, solver_name: str = "XPRESS", log_level: int = 0, is_debug: bool = False, ) -> None: - serialize("master.mps", self.master.export_as_mps(), self.emplacement) + serialize( + f"{self.master.name}.mps", self.master.export_as_mps(), self.emplacement + ) for subproblem in self.subproblems: serialize( f"{subproblem.name}.mps", subproblem.export_as_mps(), self.emplacement ) - serialize("structure.txt", self.export_structure(), self.emplacement) + serialize( + f"{self.structure_filename}", self.export_structure(), self.emplacement + ) serialize_json( "options.json", self.export_options(solver_name=solver_name, log_level=log_level), @@ -164,7 +168,9 @@ def prepare( ) if is_debug: - serialize("master.lp", self.master.export_as_lp(), self.emplacement) + serialize( + f"{self.master.name}.lp", self.master.export_as_lp(), self.emplacement + ) for subproblem in self.subproblems: serialize( f"{subproblem.name}.lp", subproblem.export_as_lp(), self.emplacement @@ -192,7 +198,7 @@ def run( log_level: int = 0, should_merge: bool = False, ) -> bool: - self.prepare(solver_name=solver_name, log_level=log_level) + self.initialise(solver_name=solver_name, log_level=log_level) if not should_merge: return_code = BendersRunner(self.emplacement).run() @@ -208,13 +214,13 @@ def run( def build_benders_decomposed_problem( - network_on_tree: Dict[TreeNode, Network], + decision_tree_root: DecisionTreeNode, database: DataBase, - configured_tree: ConfiguredTree, *, - decision_coupling_model: Optional[Model] = None, border_management: BlockBorderManagement = BlockBorderManagement.CYCLE, solver_id: str = "GLOP", + coupling_network: Network = Network(""), + struct_filename: str = "structure.txt", ) -> BendersDecomposedProblem: """ Entry point to build the xpansion problem for a time period @@ -222,40 +228,65 @@ def build_benders_decomposed_problem( Returns a Benders Decomposed problem """ - master_network = create_master_network(network_on_tree, decision_coupling_model) + if not decision_tree_root.is_leaves_prob_sum_one(): + raise RuntimeError("Decision tree must have leaves' probability sum equal one!") + + null_time_block = TimeBlock( + 0, [0] + ) # Not necessary for master, but list must be non-empty + null_scenario = 0 # Not necessary for master - # Benders Decomposed Master Problem - master = build_problem( - master_network, + coupler = build_problem( + coupling_network, database, - null_time_block := TimeBlock( # Not necessary for master, but list must be non-empty - 0, [0] - ), - null_scenario := 0, # Not necessary for master - problem_name="master", - border_management=border_management, + null_time_block, + null_scenario, + problem_name="coupler", solver_id=solver_id, - problem_strategy=InvestmentProblemStrategy(), + build_strategy=InvestmentProblemStrategy(), + risk_strategy=ExpectedValue(0.0), ) - # Benders Decomposed Sub-problems - subproblems = [] - for ( - tree_node, - time_scenario_config, - ) in configured_tree.node_to_config.items(): - for block in time_scenario_config.blocks: - # Xpansion Sub-problems + masters = [] # Benders Decomposed Master Problem + subproblems = [] # Benders Decomposed Sub-problems + + for tree_node in decision_tree_root.traverse(): + suffix = f"_{tree_node.id}" if decision_tree_root.size > 1 else "" + + masters.append( + build_problem( + tree_node.network, + database, + null_time_block, + null_scenario, + problem_name=f"master{suffix}", + solver_id=solver_id, + build_strategy=InvestmentProblemStrategy(), + decision_tree_node=tree_node.id, + risk_strategy=ExpectedValue(tree_node.prob), + ) + ) + + for block in tree_node.config.blocks: + if len(tree_node.config.blocks) > 1: + suffix += f"_t{block.id}" + subproblems.append( build_problem( - network_on_tree[tree_node], + tree_node.network, database, block, - time_scenario_config.scenarios, - problem_name=f"subproblem_{tree_node.name}_{block.id}", + tree_node.config.scenarios, + problem_name=f"subproblem{suffix}", solver_id=solver_id, - problem_strategy=OperationalProblemStrategy(), + build_strategy=OperationalProblemStrategy(), + decision_tree_node=tree_node.id, + risk_strategy=ExpectedValue(tree_node.prob), ) ) - return BendersDecomposedProblem(master, subproblems) + master = fusion_problems(masters, coupler) + + return BendersDecomposedProblem( + master, subproblems, struct_filename=struct_filename + ) diff --git a/src/andromede/simulation/decision_tree.py b/src/andromede/simulation/decision_tree.py index 267c71ee..fc16e144 100644 --- a/src/andromede/simulation/decision_tree.py +++ b/src/andromede/simulation/decision_tree.py @@ -10,19 +10,14 @@ # # This file is part of the Antares project. -import dataclasses -from dataclasses import dataclass, field -from typing import Dict, Iterable, List, Optional +import math +from dataclasses import dataclass +from typing import Generator, Iterable, List, Optional -from anytree import LevelOrderIter -from anytree import Node as TreeNode +from anytree import LevelOrderIter, NodeMixin -from andromede.expression.expression import ExpressionNode -from andromede.model.constraint import Constraint -from andromede.model.model import Model, PortFieldDefinition, PortFieldId, model -from andromede.model.variable import Variable from andromede.simulation.time_block import TimeBlock -from andromede.study.network import Component, Network, Node, create_component +from andromede.study.network import Network @dataclass(frozen=True) @@ -31,142 +26,48 @@ class InterDecisionTimeScenarioConfig: scenarios: int -@dataclass(frozen=True) -class ConfiguredTree: - node_to_config: Dict[TreeNode, InterDecisionTimeScenarioConfig] - root: TreeNode = field(init=False) - - def __post_init__(self) -> None: - # Stores the root, by getting it from any tree node - object.__setattr__(self, "root", next(iter(self.node_to_config.keys())).root) - - -def create_single_node_decision_tree( - blocks: List[TimeBlock], scenarios: int -) -> ConfiguredTree: - time_scenario_config = InterDecisionTimeScenarioConfig(blocks, scenarios) - - root = TreeNode("root") - configured_tree = ConfiguredTree( - { - root: time_scenario_config, - }, - ) - - return configured_tree - - -def _generate_tree_variables( - variables: Dict[str, Variable], tree_node: TreeNode -) -> Iterable[Variable]: - tree_variables = [] - for variable in variables.values(): - # Works as we do not allow variables in bounds, hence no problem to copy the corresponding expression nodes as is. If we had variables, we would have to replace the variable names by the ones with tree node information. - tree_variables.append( - dataclasses.replace(variable, name=f"{tree_node.name}_{variable.name}") - ) - return tree_variables - - -def _generate_tree_constraints( - constraints: Dict[str, Constraint], tree_node: TreeNode -) -> Iterable[Constraint]: - # Goal is to replace variables in constraint, lower bound and upper bound with node variable - raise NotImplementedError() - - -def _generate_tree_expression( - expression: Optional[ExpressionNode], tree_node: TreeNode -) -> ExpressionNode: - # Goal is to replace variables with node variable - # Create a copy visitor to do so - raise NotImplementedError() - - -def _generate_tree_port_field_definition( - port_field_definition: Dict[PortFieldId, PortFieldDefinition], tree_node: TreeNode -) -> Iterable[PortFieldDefinition]: - # Goal is to replace variables in the expression defining the port by node variable - raise NotImplementedError() - - -def _generate_tree_model( - tree_node: TreeNode, - component: Component, -) -> Model: - variables = _generate_tree_variables( - component.model.variables, - tree_node, - ) - constraints = _generate_tree_constraints(component.model.constraints, tree_node) - binding_constraints = _generate_tree_constraints( - component.model.binding_constraints, tree_node - ) - objective_operational_contribution = _generate_tree_expression( - component.model.objective_operational_contribution, tree_node - ) - objective_investment_contribution = _generate_tree_expression( - component.model.objective_investment_contribution, tree_node - ) - port_fields_definitions = _generate_tree_port_field_definition( - component.model.port_fields_definitions, tree_node - ) - tree_model = model( - id=f"{tree_node.name}_{component.model.id}", - constraints=constraints, - binding_constraints=binding_constraints, - parameters=component.model.parameters.values(), - variables=variables, - objective_operational_contribution=objective_operational_contribution, - objective_investment_contribution=objective_investment_contribution, - inter_block_dyn=component.model.inter_block_dyn, - ports=component.model.ports.values(), - port_fields_definitions=port_fields_definitions, - ) - - return tree_model - - -def _generate_network_on_node(network: Network, tree_node: TreeNode) -> Network: - tree_node_network = Network(tree_node.name) - - for component in network.all_components: - tree_node_model = _generate_tree_model(tree_node, component) - - # It would be nice to have the same treatment for nodes and components as they are actually the same thing... - if isinstance(component, Node): - network_node = Node(tree_node_model, id=f"{tree_node.name}_{component.id}") - tree_node_network.add_node(network_node) - else: - tree_node_component = create_component( - tree_node_model, id=f"{tree_node.name}_{component.id}" - ) - tree_node_network.add_component(tree_node_component) - - for connection in network.connections: - tree_node_network.connect(connection.port1, connection.port2) - return tree_node_network - - -def create_network_on_tree(network: Network, tree: TreeNode) -> Dict[TreeNode, Network]: - # On crée un gros modèle en dupliquant les variables; contraintes, etc à chaque noeud de l'arbre. - # Pour le master on peut : - # - Utiliser uniquement les variables, contraintes, etc dont on va avoir besoin dans la construction du problème -> nécessite déjà d'avoir des infos sur la construction des problèmes alors qu'on agit au niveau modèle ici - # - Dupliquer tout le modèle, permet de mutualiser du code avec la partie composant par noeud et plus lisible. Seul inconvénient, modèle master un peu trop riche, pas besoin des infos "opérationnelles". Mais les modèles ne sont pas très "lourds" donc on peut se le permettre. C'est l'option choisie ici. - if tree.size == 1: - return {tree: network} - else: - node_to_network = {} - for tree_node in LevelOrderIter(tree): - node_to_network[tree_node] = _generate_network_on_node(network, tree_node) - return node_to_network - - -def create_master_network( - tree_node_to_network: Dict[TreeNode, Network], - decision_coupling_model: Optional[Model], -) -> Network: - # Current implementation so that tests pass for trees with one investment nodes (in test_xpansion) - # The final implementation should gather all networks from tree nodes and connect the models with the decision coupling model (with ports) - root = next(iter(tree_node_to_network.keys())).root - return tree_node_to_network[root] +class DecisionTreeNode(NodeMixin): + id: str + config: InterDecisionTimeScenarioConfig + network: Network + prob: float + + def __init__( + self, + id: str, + config: InterDecisionTimeScenarioConfig, + network: Network = Network(""), + parent: Optional["DecisionTreeNode"] = None, + children: Optional[Iterable["DecisionTreeNode"]] = None, + prob: float = 1.0, + ) -> None: + self.id = id + self.config = config + self.network = network + self.parent = parent + + if prob < 0 or 1 < prob: + raise ValueError("Probability must be a value in the range [0, 1]") + + self.prob = prob * (parent.prob if parent is not None else 1) + if children: + self.children = children + + def traverse( + self, depth: Optional[int] = None + ) -> Generator["DecisionTreeNode", None, None]: + yield from LevelOrderIter(self, maxlevel=depth) + + def is_leaves_prob_sum_one(self) -> bool: + if not self.children: + return True + + # Since we multiply the child's prob by the parent's prob + # in the constructor, the sum of the children prob should + # equal 1 * parent.prob if the values were set correctly + if not math.isclose(self.prob, sum(child.prob for child in self.children)): + return False + + # Recursively check if child nodes have their children's + # probability sum equal to one + return all(child.is_leaves_prob_sum_one() for child in self.children) diff --git a/src/andromede/simulation/optimization.py b/src/andromede/simulation/optimization.py index 60cc318b..b53d71c7 100644 --- a/src/andromede/simulation/optimization.py +++ b/src/andromede/simulation/optimization.py @@ -40,7 +40,12 @@ from andromede.model.model import PortFieldId from andromede.simulation.linear_expression import LinearExpression, Term from andromede.simulation.linearize import linearize_expression -from andromede.simulation.strategy import MergedProblemStrategy, ModelSelectionStrategy +from andromede.simulation.strategy import ( + MergedProblemStrategy, + ModelSelectionStrategy, + RiskManagementStrategy, + UniformRisk, +) from andromede.simulation.time_block import TimeBlock from andromede.study.data import DataBase from andromede.study.network import Component, Network @@ -68,22 +73,7 @@ def _get_parameter_value( ) -> float: data = context.database.get_data(component_id, name) absolute_timestep = context.block_timestep_to_absolute_timestep(block_timestep) - return data.get_value(absolute_timestep, scenario) - - -# TODO: Maybe add the notion of constant parameter in the model -# TODO : And constant over scenarios ? -def _parameter_is_constant_over_time( - component: Component, - name: str, - context: "OptimizationContext", - block_timestep: int, - scenario: int, -) -> bool: - data = context.database.get_data(component.id, name) - return data.get_value(block_timestep, scenario) == IndexingStructure( - time=False, scenario=False - ) + return data.get_value(absolute_timestep, scenario, context.tree_node) class TimestepValueProvider(ABC): @@ -304,14 +294,21 @@ def __init__( block: TimeBlock, scenarios: int, border_management: BlockBorderManagement, + build_strategy: ModelSelectionStrategy = MergedProblemStrategy(), + risk_strategy: RiskManagementStrategy = UniformRisk(), + decision_tree_node: str = "", ): self._network = network self._database = database self._block = block self._scenarios = scenarios self._border_management = border_management + self._build_strategy = build_strategy + self._risk_strategy = risk_strategy + self._tree_node = decision_tree_node + self._component_variables: Dict[TimestepComponentVariableKey, lp.Variable] = {} - self._solver_variables: Dict[lp.Variable, SolverVariableInfo] = {} + self._solver_variables: Dict[str, SolverVariableInfo] = {} self._connection_fields_expressions: Dict[ PortFieldKey, List[ExpressionNode] ] = {} @@ -324,6 +321,18 @@ def network(self) -> Network: def scenarios(self) -> int: return self._scenarios + @property + def tree_node(self) -> str: + return self._tree_node + + @property + def build_strategy(self) -> ModelSelectionStrategy: + return self._build_strategy + + @property + def risk_strategy(self) -> RiskManagementStrategy: + return self._risk_strategy + def block_length(self) -> int: return len(self._block.timesteps) @@ -384,19 +393,19 @@ def register_component_variable( block_timestep: int, scenario: int, component_id: str, - variable_name: str, + model_var_name: str, variable: lp.Variable, ) -> None: key = TimestepComponentVariableKey( - component_id, variable_name, block_timestep, scenario + component_id, model_var_name, block_timestep, scenario ) if key not in self._component_variables: - self._solver_variables[variable] = SolverVariableInfo( + self._solver_variables[variable.name()] = SolverVariableInfo( variable.name(), len(self._solver_variables), False ) self._component_variables[key] = variable - def get_component_context(self, component: Component) -> ComponentContext: + def create_component_context(self, component: Component) -> ComponentContext: return ComponentContext(self, component) def register_connection_fields_expressions( @@ -523,7 +532,7 @@ def _create_objective( ) for solver_var in solver_vars: - opt_context._solver_variables[solver_var].is_in_objective = True + opt_context._solver_variables[solver_var.name()].is_in_objective = True obj.SetCoefficient( solver_var, obj.GetCoefficient(solver_var) + weight * term.coefficient, @@ -667,19 +676,16 @@ class OptimizationProblem: name: str solver: lp.Solver context: OptimizationContext - strategy: ModelSelectionStrategy def __init__( self, name: str, solver: lp.Solver, opt_context: OptimizationContext, - build_strategy: ModelSelectionStrategy = MergedProblemStrategy(), ) -> None: self.name = name self.solver = solver self.context = opt_context - self.strategy = build_strategy self._register_connection_fields_definitions() self._create_variables() @@ -716,10 +722,10 @@ def _register_connection_fields_definitions(self) -> None: def _create_variables(self) -> None: for component in self.context.network.all_components: - component_context = self.context.get_component_context(component) + component_context = self.context.create_component_context(component) model = component.model - for model_var in self.strategy.get_variables(model): + for model_var in self.context.build_strategy.get_variables(model): var_indexing = IndexingStructure( model_var.structure.time, model_var.structure.scenario ) @@ -733,7 +739,21 @@ def _create_variables(self) -> None: instantiated_ub_expr = _instantiate_model_expression( model_var.upper_bound, component.id, self.context ) + + # Set solver var name + # Externally, for the Solver, this variable will have a full name + # Internally, it will be indexed by a structure that takes into account + # the component id, variable name, timestep and scenario separately + solver_var_name: str = f"{model_var.name}" + if component.id: + solver_var_name = f"{component.id}_{solver_var_name}" + if self.context.tree_node: + solver_var_name = f"{self.context.tree_node}_{solver_var_name}" + for block_timestep in self.context.get_time_indices(var_indexing): + if self.context.block_length() > 1: + solver_var_name = f"{solver_var_name}_t{block_timestep}" + for scenario in self.context.get_scenario_indices(var_indexing): lower_bound = -self.solver.infinity() upper_bound = self.solver.infinity() @@ -746,14 +766,14 @@ def _create_variables(self) -> None: instantiated_ub_expr ).get_value(block_timestep, scenario) + if self.context.scenarios > 1: + solver_var_name = f"{solver_var_name}_s{scenario}" + # TODO: Add BoolVar or IntVar if the variable is specified to be integer or bool - # Externally, for the Solver, this variable will have a full name - # Internally, it will be indexed by a structure that into account - # the component id, variable name, timestep and scenario separately solver_var = self.solver.NumVar( lower_bound, upper_bound, - f"{component.id}_{model_var.name}_t{block_timestep}_s{scenario}", + solver_var_name, ) component_context.add_variable( block_timestep, scenario, model_var.name, solver_var @@ -761,7 +781,9 @@ def _create_variables(self) -> None: def _create_constraints(self) -> None: for component in self.context.network.all_components: - for constraint in self.strategy.get_constraints(component.model): + for constraint in self.context.build_strategy.get_constraints( + component.model + ): instantiated_expr = _instantiate_model_expression( constraint.expression, component.id, self.context ) @@ -780,23 +802,23 @@ def _create_constraints(self) -> None: ) _create_constraint( self.solver, - self.context.get_component_context(component), + self.context.create_component_context(component), instantiated_constraint, ) def _create_objectives(self) -> None: for component in self.context.network.all_components: - component_context = self.context.get_component_context(component) + component_context = self.context.create_component_context(component) model = component.model - for objective in self.strategy.get_objectives(model): + for objective in self.context.build_strategy.get_objectives(model): if objective is not None: _create_objective( self.solver, self.context, component, component_context, - objective, + self.context.risk_strategy(objective), ) def export_as_mps(self) -> str: @@ -815,7 +837,9 @@ def build_problem( problem_name: str = "optimization_problem", border_management: BlockBorderManagement = BlockBorderManagement.CYCLE, solver_id: str = "GLOP", - problem_strategy: ModelSelectionStrategy = MergedProblemStrategy(), + build_strategy: ModelSelectionStrategy = MergedProblemStrategy(), + risk_strategy: RiskManagementStrategy = UniformRisk(), + decision_tree_node: str = "", ) -> OptimizationProblem: """ Entry point to build the optimization problem for a time period. @@ -825,7 +849,76 @@ def build_problem( database.requirements_consistency(network) opt_context = OptimizationContext( - network, database, block, scenarios, border_management + network, + database, + block, + scenarios, + border_management, + build_strategy, + risk_strategy, + decision_tree_node, ) - return OptimizationProblem(problem_name, solver, opt_context, problem_strategy) + return OptimizationProblem(problem_name, solver, opt_context) + + +def fusion_problems( + masters: List[OptimizationProblem], coupler: OptimizationProblem +) -> OptimizationProblem: + if len(masters) == 1: + # Nothing to fusion. Just past down the master + return masters[0] + + root_master = coupler + root_master.name = "master" + + root_vars: Dict[str, lp.Variable] = dict() + root_constraints: Dict[str, lp.Constraint] = dict() + root_objective = root_master.solver.Objective() + + # We stock the coupler's variables to check for + # same name variables in the masters + for var in root_master.solver.variables(): + root_vars[var.name()] = var + + for master in masters: + context = master.context + objective = master.solver.Objective() + + for var in master.solver.variables(): + # If variable not already in coupler, we add it + # Otherwise we update its upper and lower bounds + if var.name() not in root_vars: + root_var = root_master.solver.NumVar(var.lb(), var.ub(), var.name()) + root_master.context._solver_variables[var.name()] = SolverVariableInfo( + var.name(), + len(root_master.context._solver_variables), + context._solver_variables[var.name()].is_in_objective, + ) + else: + root_var = root_vars[var.name()] + root_var.SetLb(var.lb()) + root_var.SetUb(var.ub()) + root_master.context._solver_variables[ + var.name() + ].is_in_objective = context._solver_variables[ + var.name() + ].is_in_objective + + for cstr in master.solver.constraints(): + coeff = cstr.GetCoefficient(var) + # If variable present in constraint, we add the constraint to root + if coeff != 0: + key = f"{master.name}_{cstr.name()}" + if key not in root_constraints: + root_constraints[key] = root_master.solver.Constraint( + cstr.Lb(), cstr.Ub(), key + ) + root_cstr = root_constraints[key] + root_cstr.SetCoefficient(root_var, coeff) + + obj_coeff = objective.GetCoefficient(var) + if obj_coeff != 0: + root_objective.SetCoefficient(root_var, obj_coeff) + + return root_master diff --git a/src/andromede/simulation/strategy.py b/src/andromede/simulation/strategy.py index 75e34c65..645ea646 100644 --- a/src/andromede/simulation/strategy.py +++ b/src/andromede/simulation/strategy.py @@ -13,7 +13,7 @@ from abc import ABC, abstractmethod from typing import Generator, Optional -from andromede.expression import ExpressionNode +from andromede.expression import ExpressionNode, literal from andromede.model import Constraint, Model, ProblemContext, Variable @@ -80,3 +80,34 @@ def get_objectives( self, model: Model ) -> Generator[Optional[ExpressionNode], None, None]: yield model.objective_operational_contribution + + +class RiskManagementStrategy(ABC): + """ + Abstract functor class for risk management + Its derived classes will implement risk measures: + - UniformRisk : The default case. All expressions have the same weight + - ExpectedValue : Computes the product prob * expression + TODO For now, it will only take into account the Expected Value + TODO In the future could have other risk measures? + """ + + def __call__(self, expr: ExpressionNode) -> ExpressionNode: + return self._modify_expression(expr) + + @abstractmethod + def _modify_expression(self, expr: ExpressionNode) -> ExpressionNode: + ... + + +class UniformRisk(RiskManagementStrategy): + def _modify_expression(self, expr: ExpressionNode) -> ExpressionNode: + return expr + + +class ExpectedValue(RiskManagementStrategy): + def __init__(self, prob: float) -> None: + self._prob = prob + + def _modify_expression(self, expr: ExpressionNode) -> ExpressionNode: + return literal(self._prob) * expr diff --git a/src/andromede/study/data.py b/src/andromede/study/data.py index d1e475f4..db4f0135 100644 --- a/src/andromede/study/data.py +++ b/src/andromede/study/data.py @@ -36,11 +36,7 @@ class ScenarioIndex: @dataclass(frozen=True) class AbstractDataStructure(ABC): @abstractmethod - def get_value( - self, timestep: int, scenario: int, node_id: Optional[int] = None - ) -> ( - float - ): # Is it necessary to add node_id as arguement here ? Yes if TreeData is to be considered as a child class + def get_value(self, timestep: int, scenario: int, node_id: str = "") -> float: """ Get the data value for a given timestep and scenario at a given node Implement this method in subclasses as needed. @@ -60,9 +56,7 @@ def check_requirement(self, time: bool, scenario: bool) -> bool: class ConstantData(AbstractDataStructure): value: float - def get_value( - self, timestep: int, scenario: int, node_id: Optional[int] = None - ) -> float: + def get_value(self, timestep: int, scenario: int, node_id: str = "") -> float: return self.value # ConstantData can be used for time varying or constant models @@ -82,9 +76,7 @@ class TimeSeriesData(AbstractDataStructure): time_series: Mapping[TimeIndex, float] - def get_value( - self, timestep: int, scenario: int, node_id: Optional[int] = None - ) -> float: + def get_value(self, timestep: int, scenario: int, node_id: str = "") -> float: return self.time_series[TimeIndex(timestep)] def check_requirement(self, time: bool, scenario: bool) -> bool: @@ -104,9 +96,7 @@ class ScenarioSeriesData(AbstractDataStructure): scenario_series: Mapping[ScenarioIndex, float] - def get_value( - self, timestep: int, scenario: int, node_id: Optional[int] = None - ) -> float: + def get_value(self, timestep: int, scenario: int, node_id: str = "") -> float: return self.scenario_series[ScenarioIndex(scenario)] def check_requirement(self, time: bool, scenario: bool) -> bool: @@ -126,9 +116,7 @@ class TimeScenarioSeriesData(AbstractDataStructure): time_scenario_series: Mapping[TimeScenarioIndex, float] - def get_value( - self, timestep: int, scenario: int, node_id: Optional[int] = None - ) -> float: + def get_value(self, timestep: int, scenario: int, node_id: str = "") -> float: return self.time_scenario_series[TimeScenarioIndex(timestep, scenario)] def check_requirement(self, time: bool, scenario: bool) -> bool: @@ -140,17 +128,9 @@ def check_requirement(self, time: bool, scenario: bool) -> bool: @dataclass(frozen=True) class TreeData(AbstractDataStructure): - data: Mapping[int, AbstractDataStructure] - - def get_value( - self, timestep: int, scenario: int, node_id: Optional[int] = None - ) -> float: - if ( - not node_id - ): # TODO : Could we remove the default None argument for node_id ? - raise ValueError( - "A node_id must be specified to retrieve a value in TreeData." - ) + data: Mapping[str, AbstractDataStructure] + + def get_value(self, timestep: int, scenario: int, node_id: str = "") -> float: return self.data[node_id].get_value(timestep, scenario) def check_requirement(self, time: bool, scenario: bool) -> bool: @@ -161,7 +141,7 @@ def check_requirement(self, time: bool, scenario: bool) -> bool: @dataclass(frozen=True) -class ComponentParameterIndex: +class DatabaseIndex: component_id: str parameter_name: str @@ -174,22 +154,20 @@ class DataBase: Data can have different structure : constant, varying in time or scenarios. """ - _data: Dict[ComponentParameterIndex, AbstractDataStructure] + _data: Dict[DatabaseIndex, AbstractDataStructure] def __init__(self) -> None: - self._data: Dict[ComponentParameterIndex, AbstractDataStructure] = {} + self._data: Dict[DatabaseIndex, AbstractDataStructure] = {} def get_data(self, component_id: str, parameter_name: str) -> AbstractDataStructure: - return self._data[ComponentParameterIndex(component_id, parameter_name)] + return self._data[DatabaseIndex(component_id, parameter_name)] def add_data( self, component_id: str, parameter_name: str, data: AbstractDataStructure ) -> None: - self._data[ComponentParameterIndex(component_id, parameter_name)] = data + self._data[DatabaseIndex(component_id, parameter_name)] = data - def get_value( - self, index: ComponentParameterIndex, timestep: int, scenario: int - ) -> float: + def get_value(self, index: DatabaseIndex, timestep: int, scenario: int) -> float: if index in self._data: return self._data[index].get_value(timestep, scenario) else: diff --git a/src/andromede/study/network.py b/src/andromede/study/network.py index 44b77974..4bf0c5d1 100644 --- a/src/andromede/study/network.py +++ b/src/andromede/study/network.py @@ -15,8 +15,8 @@ including nodes, links, and components (model instantations). """ import itertools -from dataclasses import dataclass -from typing import Dict, Iterable, List +from dataclasses import dataclass, field, replace +from typing import Any, Dict, Iterable, List, cast from andromede.model import PortField, PortType from andromede.model.model import Model, PortFieldId @@ -32,6 +32,9 @@ class Component: model: Model id: str + def replicate(self, /, **changes: Any) -> "Component": + return replace(self, **changes) + def create_component(model: Model, id: str) -> Component: return Component(model=model, id=id) @@ -46,6 +49,10 @@ class Node(Component): pass +def create_node(model: Model, id: str) -> Node: + return Node(model=model, id=id) + + @dataclass(frozen=True) class PortRef: component: Component @@ -56,12 +63,9 @@ class PortRef: class PortsConnection: port1: PortRef port2: PortRef - master_port: Dict[PortField, PortRef] + master_port: Dict[PortField, PortRef] = field(init=False, default_factory=dict) - def __init__(self, port1: PortRef, port2: PortRef): - self.port1 = port1 - self.port2 = port2 - self.master_port = {} + def __post_init__(self) -> None: self.__validate_ports() def __validate_ports(self) -> None: @@ -106,6 +110,10 @@ def get_port_type(self) -> PortType: raise ValueError(f"Missing port: {port_1}") return port_1.port_type + def replicate(self, /, **changes: Any) -> "PortsConnection": + # Shallow copy + return replace(self, **changes) + @dataclass class Network: @@ -113,11 +121,10 @@ class Network: Network model: simply nodes, links, and components. """ - def __init__(self, id: str): - self.id: str = id - self._nodes: Dict[str, Node] = {} - self._components: Dict[str, Component] = {} - self._connections: List[PortsConnection] = [] + id: str + _nodes: Dict[str, Node] = field(init=False, default_factory=dict) + _components: Dict[str, Component] = field(init=False, default_factory=dict) + _connections: List[PortsConnection] = field(init=False, default_factory=list) def _check_node_exists(self, node_id: str) -> None: if node_id not in self._nodes: @@ -162,3 +169,20 @@ def connect(self, port1: PortRef, port2: PortRef) -> None: @property def connections(self) -> Iterable[PortsConnection]: return self._connections + + def get_connection(self, idx: int) -> PortsConnection: + return self._connections[idx] + + def replicate(self, /, **changes: Any) -> "Network": + replica = replace(self, **changes) + + for node in self.nodes: + replica.add_node(cast(Node, node.replicate())) + + for component in self.components: + replica.add_component(component.replicate()) + + for connection in self.connections: + replica._connections.append(connection.replicate()) + + return replica diff --git a/tests/andromede/test_generate_network_on_tree.py b/tests/andromede/test_generate_network_on_tree.py deleted file mode 100644 index b2e516a6..00000000 --- a/tests/andromede/test_generate_network_on_tree.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2024, RTE (https://www.rte-france.com) -# -# See AUTHORS.txt -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# SPDX-License-Identifier: MPL-2.0 -# -# This file is part of the Antares project. - - -from anytree import Node as TreeNode - -from andromede.libs.standard import THERMAL_CLUSTER_MODEL_HD -from andromede.simulation.decision_tree import _generate_tree_model -from andromede.study.network import create_component - - -def test_generate_model_on_node() -> None: - thermal = create_component(model=THERMAL_CLUSTER_MODEL_HD, id="thermal") - - tree_node_id = "2030" - tree_node_model = _generate_tree_model(TreeNode(tree_node_id), thermal) - - # How to compare model efficiently with only change in name ? - assert tree_node_model.id == f"{tree_node_id}_{thermal.id}" - - for variable in thermal.model.variables.values(): - assert f"{tree_node_id}_{variable.name}" in tree_node_model.variables - - # Create dedicated function - tree_variable = tree_node_model.variables[f"{tree_node_id}_{variable.name}"] - # assert diff --git a/tests/andromede/test_investment_pathway.py b/tests/andromede/test_investment_pathway.py deleted file mode 100644 index 1b7b0052..00000000 --- a/tests/andromede/test_investment_pathway.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright (c) 2024, RTE (https://www.rte-france.com) -# -# See AUTHORS.txt -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# SPDX-License-Identifier: MPL-2.0 -# -# This file is part of the Antares project. - -import pytest -from anytree import Node as TreeNode - -from andromede.libs.standard import ( - DEMAND_MODEL, - GENERATOR_MODEL, - NODE_WITH_SPILL_AND_ENS, - THERMAL_CANDIDATE_WITH_ALREADY_INSTALLED_CAPA, -) -from andromede.model.model import model -from andromede.simulation.benders_decomposed import build_benders_decomposed_problem -from andromede.simulation.decision_tree import ( - ConfiguredTree, - InterDecisionTimeScenarioConfig, - create_network_on_tree, -) -from andromede.simulation.time_block import TimeBlock -from andromede.study.data import ConstantData, DataBase, TreeData -from andromede.study.network import Component, Network, Node, PortRef, create_component - - -@pytest.fixture -def generator() -> Component: - generator = create_component( - model=GENERATOR_MODEL, - id="BASE", - ) - return generator - - -@pytest.fixture -def candidate() -> Component: - candidate = create_component( - model=THERMAL_CANDIDATE_WITH_ALREADY_INSTALLED_CAPA, id="CAND" - ) - return candidate - - -@pytest.fixture -def demand() -> Component: - demand = create_component(model=DEMAND_MODEL, id="D") - return demand - - -@pytest.fixture -def node() -> Node: - node = Node(model=NODE_WITH_SPILL_AND_ENS, id="N") - return node - - -def test_investment_pathway_on_a_tree_with_one_root_two_children( - generator: Component, - candidate: Component, - demand: Component, - node: Node, -) -> None: - """ - This use case aims at representing the situation where investment decisions are to be made at different, say "planning times". An actualisation rate can be taken into account. - - The novelty compared the actual usage of planning tools, is that the planning decisions at a given time are taken without knowing exactly which "macro-scenario" / hypothesis on the system that will eventually happen (only knowing the probability distribution of these hypothesis). - - This example models a case where investment decisions have to be made in 2030 and 2040. - - In 2030, we have full knowledge of the existing assets - - In 2040, two equiprobable hypothesis are possible : - - A case where there is no change in the generation assets since 2030 (except te potential investment in 2030) - - A case where a base generation unit is present - - When taking the decision in 2030, we do not know which case will occur in 2040 and we seek the best decision given a risk criterion (the expectation here). - - The value of these models lies in the output for the first decision rather than the decisions at the later stages as the first decisions are related to "what we have to do today" ? - - More specifically, to define the use case, we define the following tree representing the system at the different decision times and hypothesis - - 2030 (root node) : - Demand = 300 - Generator : - P_max = 200, - Production cost = 10, - Max investment = 400, - Investment cost = 100 - Unsupplied energy : - Cost = 10000 - - 2040 with new base (scenario 1) : - Demand = 600 - Generator : - P_max = 200, - Production cost = 10, - Max investment = 100, - Investment cost = 50 - Base : - P_max = 200, - Production cost = 5 - Unsupplied energy : - Cost = 10000 - - 2040 no base (scenario 2) : - Demand = 600 - Generator : - P_max = 200, - Production cost = 10, - Max investment = 100, - Investment cost = 50 - Unsupplied energy : - Cost = 10000 - - In the second decision time, demand increases from 300 to 600 in both scenarios. However, investment capacity in the candidate is limited to 100 in the second stage. Investment cost decreases to reflect the effect of a discount rate. - - In case 1, a base unit of capacity 100 has arrived and can produce at the same cost than the candidate. As it is more intersting to invest the latest possible, the optimal solution for scenario 1 is to invest [100, 100]. - - In case 2, there is no base unit and the max investment is 100 in the second stage, therefore if we consider scenario 2 only, as unsupplied energy is very expensive, the best investment is [300, 100] - - But here as we solve on the tree, we need to find the best solution in expectation on the set of paths in the tree. - - With initial investment = 100 : - Total cost = [100 x 100 (investment root) + 10 x 300 (prod root)] - + 0.5 (proba child 1) x [100 x 50 (investment child 1) + 10 x 400 (prod generator) + 5 x 200 (prod base)] - + 0.5 (proba child 2) x [100 x 50 (investment child 2) + 10 x 400 (prod generator) + 1000 x 200 (unsupplied energy)] - = 122 500 - - With initial investment = 300 : - Total cost = [100 x 300 (investment root) + 10 x 300 (prod root)] - + 0.5 (proba child 1) x [10 x 400 (prod generator) + 5 x 200 (prod base)] - + 0.5 (proba child 2) x [100 x 50 (investment child 2) + 10 x 600 (prod generator)] - = 41 000 - - As investing less than 300 in the first stage would increase the unsupplied energy and lead to an increase in overall cost (-1 MW invested in 1st stage => + 1 MW unsp energy => +900/MW cost increase more or less), the optimal solution is to invest : - - 300 at first stage - - 0 in child 1 - - 100 in child 2 - - """ - - # Either we duplicate all network for each node : Lots of duplications - # or we index all data, parameters, variables by the resolution node : Make the data struture dependent of the resolution tree... - - database = DataBase() - database.add_data("N", "spillage", ConstantData(10)) - database.add_data("N", "unsupplied_energy", ConstantData(10000)) - - database.add_data( - "D", - "demand", - TreeData({0: ConstantData(300), 1: ConstantData(600), 2: ConstantData(600)}), - ) - - database.add_data("CAND", "op_cost", ConstantData(10)) - database.add_data("CAND", "already_installed_capa", ConstantData(200)) - database.add_data( - "CAND", - "invest_cost", - TreeData({0: ConstantData(100), 1: ConstantData(50), 2: ConstantData(50)}), - ) - database.add_data( - "CAND", - "max_invest", - TreeData({0: ConstantData(400), 1: ConstantData(100), 2: ConstantData(100)}), - ) - - database.add_data( - "BASE", - "p_max", - TreeData({0: ConstantData(0), 1: ConstantData(200), 2: ConstantData(0)}), - ) - database.add_data("BASE", "cost", ConstantData(5)) - - # Fonction qui crée les composants / noeud en fonction de l'arbre et du Database initial / modèles + générer les contraintes couplantes temporelles trajectoire + actualisation + - # contraintes industrielles liées à l'arbre ? - # Test mode peigne - # Générer le modèle "couplant" - - network = Network("test") - network.add_node(node) - network.add_component(demand) - network.add_component(generator) - network.add_component(candidate) - network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) - network.connect(PortRef(generator, "balance_port"), PortRef(node, "balance_port")) - network.connect(PortRef(candidate, "balance_port"), PortRef(node, "balance_port")) - - scenarios = 1 - time_scenario_config = InterDecisionTimeScenarioConfig( - [TimeBlock(0, [0])], scenarios - ) - - root = TreeNode("2030") - new_base = TreeNode("2040_new_base", parent=root) - no_base = TreeNode("2040_no_base", parent=root) - configured_tree = ConfiguredTree( - { - root: time_scenario_config, - new_base: time_scenario_config, - no_base: time_scenario_config, - }, - ) - - decision_coupling_model = model("DECISION_COUPLING") - - tree_node_to_network = create_network_on_tree(network, configured_tree.root) - - problems = build_benders_decomposed_problem( - tree_node_to_network, - database, - configured_tree, - decision_coupling_model=decision_coupling_model, - ) - - # Réfléchir à la représentation des variables dans l'arbre diff --git a/tests/functional/test_investment_pathway.py b/tests/functional/test_investment_pathway.py new file mode 100644 index 00000000..d2a25953 --- /dev/null +++ b/tests/functional/test_investment_pathway.py @@ -0,0 +1,514 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. + +import pytest + +from andromede.expression import literal, var +from andromede.expression.indexing_structure import IndexingStructure +from andromede.libs.standard import ( + DEMAND_MODEL, + GENERATOR_MODEL, + NODE_WITH_SPILL_AND_ENS, + THERMAL_CANDIDATE_WITH_ALREADY_INSTALLED_CAPA, +) +from andromede.model.common import ProblemContext +from andromede.model.constraint import Constraint +from andromede.model.model import model +from andromede.model.variable import float_variable +from andromede.simulation import ( + BendersSolution, + TimeBlock, + build_benders_decomposed_problem, +) +from andromede.simulation.decision_tree import ( + DecisionTreeNode, + InterDecisionTimeScenarioConfig, +) +from andromede.study.data import ConstantData, DataBase, TreeData +from andromede.study.network import Component, Network, Node, PortRef, create_component + + +@pytest.fixture +def generator() -> Component: + generator = create_component( + model=GENERATOR_MODEL, + id="BASE", + ) + return generator + + +@pytest.fixture +def candidate() -> Component: + candidate = create_component( + model=THERMAL_CANDIDATE_WITH_ALREADY_INSTALLED_CAPA, id="CAND" + ) + return candidate + + +@pytest.fixture +def demand() -> Component: + demand = create_component(model=DEMAND_MODEL, id="D") + return demand + + +@pytest.fixture +def node() -> Node: + node = Node(model=NODE_WITH_SPILL_AND_ENS, id="N") + return node + + +def test_investment_pathway_on_sequential_nodes( + node: Node, + demand: Component, + candidate: Component, +) -> None: + """ + A first simple test on the investment pathway + Here, only two nodes are represented, a parent and a child nodes + with probability one of going from parent to child + + The goal here is to show that, in the parent node, the demand is already met + by the existing fixed production. However, for the child node without any new + investment, it would create some unsupplied energy, which is very expensive. + + The investment on the child node, even though enough for the demand, is also + more expensive than on the parent (this should represent a late investment fee). + + To minimize the expected cost in this 2-node tree, one should expect the maximum + investment on the parent node, and the rest on the child node. + + Here below the values used: + + PARENT | CHILD + Demand (MW): 100 200 + Fixed prod (MW): 100 100 + Max invest (MW): 80 100 + Op cost ($): 10 10 + Invest cost ($): 100 300 + ENS cost ($): 10000 10000 + + The solution should be: + + prob | investment | operational + parent: 1 x [ 100 x 80 + 10 x 100 ] + child : + 1 x [ 300 x 20 + 10 x 200 ] + = 17 000 + + """ + # === Populating Database === + database = DataBase() + database.add_data("N", "spillage_cost", ConstantData(10)) + database.add_data("N", "ens_cost", ConstantData(10000)) + + database.add_data( + "D", + "demand", + TreeData( + { + "parent": ConstantData(100), + "child": ConstantData(200), + } + ), + ) + + database.add_data("CAND", "op_cost", ConstantData(10)) + database.add_data("CAND", "already_installed_capa", ConstantData(100)) + + database.add_data( + "CAND", + "invest_cost", + TreeData( + { + "parent": ConstantData(100), + "child": ConstantData(300), + } + ), + ) + + database.add_data( + "CAND", + "max_invest", + TreeData( + { + "parent": ConstantData(80), + "child": ConstantData(100), + } + ), + ) + + # === Coupling model === + # Used between nodes in the decision tree + COUPLING_MODEL = model( + id="COUPLING", + variables=[ + float_variable( + "parent_CAND_delta_invest", + lower_bound=literal(0), + structure=IndexingStructure(False, False), + context=ProblemContext.INVESTMENT, + ), + float_variable( + "parent_CAND_invested_capa", + lower_bound=literal(0), + structure=IndexingStructure(False, False), + context=ProblemContext.INVESTMENT, + ), + float_variable( + "child_CAND_delta_invest", + lower_bound=literal(0), + structure=IndexingStructure(False, False), + context=ProblemContext.INVESTMENT, + ), + float_variable( + "child_CAND_invested_capa", + lower_bound=literal(0), + structure=IndexingStructure(False, False), + context=ProblemContext.INVESTMENT, + ), + ], + constraints=[ + Constraint( + name="Max investment on parent", + expression=var("parent_CAND_invested_capa") + == var("parent_CAND_delta_invest"), + context=ProblemContext.INVESTMENT, + ), + Constraint( + name="Max investment on child", + expression=var("child_CAND_invested_capa") + == var("child_CAND_delta_invest") + var("parent_CAND_invested_capa"), + context=ProblemContext.INVESTMENT, + ), + ], + ) + + # === Network === + network_coupling = Network("coupling_test") + network_coupling.add_component(create_component(model=COUPLING_MODEL, id="")) + + demand_par = demand.replicate() + candidate_par = candidate.replicate() + + network_par = Network("parent_test") + network_par.add_node(node) + network_par.add_component(demand_par) + network_par.add_component(candidate_par) + network_par.connect( + PortRef(demand_par, "balance_port"), PortRef(node, "balance_port") + ) + network_par.connect( + PortRef(candidate_par, "balance_port"), PortRef(node, "balance_port") + ) + + demand_chd = demand.replicate() + candidate_chd = candidate.replicate() + + network_chd = Network("child_test") + network_chd.add_node(node) + network_chd.add_component(demand_chd) + network_chd.add_component(candidate_chd) + network_chd.connect( + PortRef(demand_chd, "balance_port"), PortRef(node, "balance_port") + ) + network_chd.connect( + PortRef(candidate_chd, "balance_port"), PortRef(node, "balance_port") + ) + + # === Decision tree creation === + config = InterDecisionTimeScenarioConfig([TimeBlock(0, [0])], 1) + + decision_tree_par = DecisionTreeNode("parent", config, network_par) + decision_tree_chd = DecisionTreeNode( + "child", config, network_chd, parent=decision_tree_par + ) + + # === Build problem === + xpansion = build_benders_decomposed_problem( + decision_tree_par, database, coupling_network=network_coupling + ) + + data = { + "solution": { + "overall_cost": 17_000, + "values": { + "parent_CAND_delta_invest": 80, + "child_CAND_delta_invest": 20, + "parent_CAND_invested_capa": 80, + "child_CAND_invested_capa": 100, + }, + } + } + solution = BendersSolution(data) + + # === Run === + assert xpansion.run() + decomposed_solution = xpansion.solution + if decomposed_solution is not None: # For mypy only + assert decomposed_solution.is_close( + solution + ), f"Solution differs from expected: {decomposed_solution}" + + +def test_investment_pathway_on_a_tree_with_one_root_two_children( + generator: Component, + candidate: Component, + demand: Component, + node: Node, +) -> None: + """ + This use case aims at representing the situation where investment decisions are to be made at different, say "planning times". + An actualization rate can be taken into account. + + The novelty compared the actual usage of planning tools, is that the planning decisions at a given time + are taken without knowing exactly which "macro-scenario" / hypothesis on the system that will eventually happen + (only knowing the probability distribution of these hypothesis). + + This example models a case where investment decisions have to be made in 2030 and 2040. + - In 2030, we have full knowledge of the existing assets + - In 2040, two possible hypothesis are possible : + - P=0.2 => A case where there is no change in the generation assets since 2030 (except the potential investment in 2030) + - P=0.8 => A case where a base generation unit is present + + When taking the decision in 2030, we do not know which case will occur in 2040 + and we seek the best decision given a risk criterion (the expectation here). + The value of these models lies in the output for the first decision rather than + the decisions at the later stages as the first decisions are related to "what we have to do today" ? + More specifically, to define the use case, we define the following tree representing the system at the different decision times and hypothesis + + 2030 (root node) : + Demand = 300 + Generator : + P_max = 200, + Production cost = 10, + Max investment = 400, + Investment cost = 100 + Unsupplied energy : + Cost = 10 000 + + 2040 with new base (child A) : + Demand = 600 + Generator : + P_max = 200, + Production cost = 10, + Max investment = 100, + Investment cost = 50 + Base : + P_max = 200, + Production cost = 5 + Unsupplied energy : + Cost = 10 000 + + 2040 no base (child B) : + Demand = 600 + Generator : + P_max = 200, + Production cost = 10, + Max investment = 100, + Investment cost = 50 + Unsupplied energy : + Cost = 10 000 + + In the second decision time, demand increases from 300 to 600 in both scenarios. + However, investment capacity in the candidate is limited to 100 in the second stage. + Investment cost decreases to reflect the effect of a discount rate. + + In case 1, a base unit of capacity 100 has arrived and can produce at smaller cost than the candidate. + As it is more interesting to invest the latest possible, the optimal solution for this scenario is to invest [100, 100]. + + In case 2, there is no base unit and the max investment is 100 in the second stage, + therefore if we consider scenario 2 only, as unsupplied energy is very expensive, the best investment is [300, 100] + + But here as we solve on the tree, we need to find the best solution in expectation on the set of paths in the tree. + + Case 1 : prob | investment | operational + root: 1 x [ 100 x 100 + 10 x 300 ] + child A: + 0.8 x [ 50 x 100 + 10 x 400 (generator) + 5 x 200 (base)] + child B: + 0.2 x [ 50 x 100 + 10 x 400 (generator) + 10 000 x 200 (unsupplied energy)] + = 422 800 + + Case 2 : prob | investment | operational + root: 1 x [ 100 x 300 + 10 x 300 ] + child A: + 0.8 x [ 50 x 0 + 10 x 400 (generator) + 5 x 200 (base)] + child B: + 0.2 x [ 50 x 100 + 10 x 600 (generator)] + = 39 200 + + As investing less than 300 in the first stage would increase the unsupplied energy and lead to an increase in overall cost + (-1 MW invested in 1st stage => + 1 MW unsupplied energy => +900/MW cost increase more or less), the optimal solution is to invest : + - 300 at first stage + - 0 in child A + - 100 in child B + """ + + database = DataBase() + database.add_data("N", "spillage_cost", ConstantData(10)) + database.add_data("N", "ens_cost", ConstantData(10_000)) + + database.add_data( + "D", + "demand", + TreeData( + { + "root": ConstantData(300), + "childA": ConstantData(600), + "childB": ConstantData(600), + } + ), + ) + + database.add_data("CAND", "op_cost", ConstantData(10)) + database.add_data("CAND", "already_installed_capa", ConstantData(200)) + database.add_data( + "CAND", + "invest_cost", + TreeData( + { + "root": ConstantData(100), + "childA": ConstantData(50), + "childB": ConstantData(50), + } + ), + ) + database.add_data( + "CAND", + "max_invest", + TreeData( + { + "root": ConstantData(400), + "childA": ConstantData(100), + "childB": ConstantData(100), + } + ), + ) + + database.add_data("BASE", "p_max", ConstantData(200)) + database.add_data("BASE", "cost", ConstantData(5)) + + COUPLING_MODEL = model( + id="COUPLING", + variables=[ + float_variable( + "root_CAND_delta_invest", + lower_bound=literal(0), + structure=IndexingStructure(False, False), + context=ProblemContext.INVESTMENT, + ), + float_variable( + "root_CAND_invested_capa", + lower_bound=literal(0), + structure=IndexingStructure(False, False), + context=ProblemContext.INVESTMENT, + ), + float_variable( + "childA_CAND_delta_invest", + lower_bound=literal(0), + structure=IndexingStructure(False, False), + context=ProblemContext.INVESTMENT, + ), + float_variable( + "childA_CAND_invested_capa", + lower_bound=literal(0), + structure=IndexingStructure(False, False), + context=ProblemContext.INVESTMENT, + ), + float_variable( + "childB_CAND_delta_invest", + lower_bound=literal(0), + structure=IndexingStructure(False, False), + context=ProblemContext.INVESTMENT, + ), + float_variable( + "childB_CAND_invested_capa", + lower_bound=literal(0), + structure=IndexingStructure(False, False), + context=ProblemContext.INVESTMENT, + ), + ], + constraints=[ + Constraint( + name="Max investment on root", + expression=var("root_CAND_invested_capa") + == var("root_CAND_delta_invest"), + context=ProblemContext.INVESTMENT, + ), + Constraint( + name="Max investment on child A", + expression=var("childA_CAND_invested_capa") + == var("childA_CAND_delta_invest") + var("root_CAND_invested_capa"), + context=ProblemContext.INVESTMENT, + ), + Constraint( + name="Max investment on child B", + expression=var("childB_CAND_invested_capa") + == var("childB_CAND_delta_invest") + var("root_CAND_invested_capa"), + context=ProblemContext.INVESTMENT, + ), + ], + ) + + network_coupler = Network("coupling_test") + network_coupler.add_component(create_component(model=COUPLING_MODEL, id="")) + + network_root = Network("root_network") + network_root.add_node(node) + network_root.add_component(demand) + network_root.add_component(candidate) + network_root.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network_root.connect( + PortRef(candidate, "balance_port"), PortRef(node, "balance_port") + ) + + network_childA = network_root.replicate(id="childA_network") + network_childA.add_component(generator) + network_childA.connect( + PortRef(generator, "balance_port"), PortRef(node, "balance_port") + ) + + network_childB = network_root.replicate(id="childB_network") + + scenarios = 1 + time_scenario_config = InterDecisionTimeScenarioConfig( + [TimeBlock(0, [0])], scenarios + ) + + dt_root = DecisionTreeNode("root", time_scenario_config, network_root) + dt_child_A = DecisionTreeNode( + "childA", time_scenario_config, network_childA, parent=dt_root, prob=0.8 + ) + dt_child_B = DecisionTreeNode( + "childB", time_scenario_config, network_childB, parent=dt_root, prob=0.2 + ) + + xpansion = build_benders_decomposed_problem( + dt_root, database, coupling_network=network_coupler + ) + + data = { + "solution": { + "overall_cost": 39_200, + "values": { + "root_CAND_delta_invest": 300, + "childA_CAND_delta_invest": 0, + "childB_CAND_delta_invest": 100, + "root_CAND_invested_capa": 300, + "childA_CAND_invested_capa": 300, + "childB_CAND_invested_capa": 400, + }, + } + } + solution = BendersSolution(data) + + assert xpansion.run() + decomposed_solution = xpansion.solution + if decomposed_solution is not None: # For mypy only + assert decomposed_solution.is_close( + solution + ), f"Solution differs from expected: {decomposed_solution}" diff --git a/tests/functional/test_xpansion.py b/tests/functional/test_xpansion.py index d02b4e74..b36307b3 100644 --- a/tests/functional/test_xpansion.py +++ b/tests/functional/test_xpansion.py @@ -20,7 +20,6 @@ DEMAND_MODEL, GENERATOR_MODEL, NODE_BALANCE_MODEL, - NODE_WITH_SPILL_AND_ENS, THERMAL_CANDIDATE, ) from andromede.model import ( @@ -40,11 +39,6 @@ TimeBlock, build_problem, ) -from andromede.simulation.benders_decomposed import build_benders_decomposed_problem -from andromede.simulation.decision_tree import ( - create_network_on_tree, - create_single_node_decision_tree, -) from andromede.study import ( Component, ConstantData, @@ -191,7 +185,7 @@ def test_generation_xpansion_single_time_step_single_scenario( database, TimeBlock(1, [0]), scenarios, - problem_strategy=MergedProblemStrategy(), + build_strategy=MergedProblemStrategy(), ) status = problem.solver.Solve() @@ -286,60 +280,6 @@ def test_two_candidates_xpansion_single_time_step_single_scenario( assert output == expected_output, f"Output differs from expected: {output}" -def test_model_export_xpansion_single_time_step_single_scenario( - generator: Component, - candidate: Component, - cluster_candidate: Component, - demand: Component, -) -> None: - """ - Same test as before but this time we separate master/subproblem and - export the problems in MPS format to be solved by the Benders solver in Xpansion - """ - - database = DataBase() - database.add_data("D", "demand", ConstantData(400)) - - database.add_data("N", "spillage_cost", ConstantData(1)) - database.add_data("N", "ens_cost", ConstantData(501)) - - database.add_data("G1", "p_max", ConstantData(200)) - database.add_data("G1", "cost", ConstantData(45)) - - database.add_data("CAND", "op_cost", ConstantData(10)) - database.add_data("CAND", "invest_cost", ConstantData(490)) - database.add_data("CAND", "max_invest", ConstantData(1000)) - - database.add_data("DISCRETE", "op_cost", ConstantData(10)) - database.add_data("DISCRETE", "invest_cost", ConstantData(200)) - database.add_data("DISCRETE", "p_max_per_unit", ConstantData(10)) - - node = Node(model=NODE_WITH_SPILL_AND_ENS, id="N") - network = Network("test") - network.add_node(node) - network.add_component(demand) - network.add_component(generator) - network.add_component(candidate) - network.add_component(cluster_candidate) - network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) - network.connect(PortRef(generator, "balance_port"), PortRef(node, "balance_port")) - network.connect(PortRef(candidate, "balance_port"), PortRef(node, "balance_port")) - network.connect( - PortRef(cluster_candidate, "balance_port"), PortRef(node, "balance_port") - ) - - blocks = [TimeBlock(1, [0])] - scenarios = 1 - - configured_tree = create_single_node_decision_tree(blocks, scenarios) - tree_node_to_network = create_network_on_tree(network, configured_tree.root) - - xpansion = build_benders_decomposed_problem( - tree_node_to_network, database, configured_tree - ) - assert xpansion.run() - - def test_generation_xpansion_two_time_steps_two_scenarios( generator: Component, candidate: Component, diff --git a/tests/integration/test_benders_decomposed.py b/tests/integration/test_benders_decomposed.py index a6011161..1990029a 100644 --- a/tests/integration/test_benders_decomposed.py +++ b/tests/integration/test_benders_decomposed.py @@ -38,8 +38,8 @@ build_benders_decomposed_problem, ) from andromede.simulation.decision_tree import ( - create_network_on_tree, - create_single_node_decision_tree, + DecisionTreeNode, + InterDecisionTimeScenarioConfig, ) from andromede.study import ( Component, @@ -233,17 +233,19 @@ def test_benders_decomposed_integration( scenarios = 1 blocks = [TimeBlock(1, [0])] - configured_tree = create_single_node_decision_tree(blocks, scenarios) - tree_node_to_network = create_network_on_tree(network, configured_tree.root) + config = InterDecisionTimeScenarioConfig(blocks, scenarios) + decision_tree_root = DecisionTreeNode("", config, network) - xpansion = build_benders_decomposed_problem( - tree_node_to_network, database, configured_tree - ) + xpansion = build_benders_decomposed_problem(decision_tree_root, database) data = { "solution": { "overall_cost": 80_000, - "values": {"CAND_p_max_t0_s0": 100, "DISCRETE_p_max_t0_s0": 100}, + "values": { + "CAND_p_max": 100, + "DISCRETE_p_max": 100, + "DISCRETE_nb_units": 10, + }, } } solution = BendersSolution(data) @@ -325,18 +327,16 @@ def test_benders_decomposed_multi_time_block_single_scenario( scenarios = 1 blocks = [TimeBlock(1, [0]), TimeBlock(2, [1])] - configured_tree = create_single_node_decision_tree(blocks, scenarios) - tree_node_to_network = create_network_on_tree(network, configured_tree.root) + config = InterDecisionTimeScenarioConfig(blocks, scenarios) + decision_tree_root = DecisionTreeNode("", config, network) - xpansion = build_benders_decomposed_problem( - tree_node_to_network, database, configured_tree - ) + xpansion = build_benders_decomposed_problem(decision_tree_root, database) data = { "solution": { "overall_cost": 62_000, "values": { - "CAND_p_max_t0_s0": 100, + "CAND_p_max": 100, }, } } diff --git a/tests/unittests/test_generate_network_on_tree.py b/tests/unittests/test_generate_network_on_tree.py new file mode 100644 index 00000000..6ac77b9b --- /dev/null +++ b/tests/unittests/test_generate_network_on_tree.py @@ -0,0 +1,101 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. + +import pytest + +from andromede.simulation import TimeBlock +from andromede.simulation.decision_tree import ( + DecisionTreeNode, + InterDecisionTimeScenarioConfig, +) +from andromede.study.network import Network + + +def test_decision_tree_generation() -> None: + scenarios = 1 + blocks = [TimeBlock(1, [0])] + config = InterDecisionTimeScenarioConfig(blocks, scenarios) + + network = Network("network_id") + root = DecisionTreeNode("root", config, network) + + assert root.id == "root" + assert root.parent is None + assert root.prob == 1.0 + assert not root.children # No children + + child = DecisionTreeNode("child", config, parent=root, prob=0.8) + + assert child.parent == root + assert child.prob == 0.8 + assert child in root.children + + grandchild = DecisionTreeNode("grandchild", config, parent=child, prob=0.6) + + assert grandchild.parent == child + assert grandchild.prob == (0.8 * 0.6) + assert (grandchild not in root.children) and (grandchild in child.children) + + with pytest.raises(ValueError, match="Probability must be a value in the range"): + great_grandchild = DecisionTreeNode( + "greatgrandchild", config, parent=grandchild, prob=2.0 + ) + + with pytest.raises(ValueError, match="Probability must be a value in the range"): + great_grandchild = DecisionTreeNode( + "greatgrandchild", config, parent=grandchild, prob=-0.3 + ) + + +def test_decision_tree_probabilities() -> None: + scenarios = 1 + blocks = [TimeBlock(1, [0])] + config = InterDecisionTimeScenarioConfig(blocks, scenarios) + network = Network("network_id") + + """ + root (p = 1) + |- l_child (p = 0.7) + | |- ll_child (p = 0.5) + | | `- lll_child (p = 1) + | | + | `- lr_child (p = 0.5) + | + `- r_child (p = 0.3) + |- rl_child (p = 0.4) + `- rr_child (p = 0.5) + """ + + # Root + root = DecisionTreeNode("root", config, network) + + # 1st level + l_child = DecisionTreeNode("l_child", config, parent=root, prob=0.7) + r_child = DecisionTreeNode("r_child", config, parent=root, prob=0.3) + + # 2nd level + ll_child = DecisionTreeNode("ll_child", config, parent=l_child, prob=0.5) + lr_child = DecisionTreeNode("lr_child", config, parent=l_child, prob=0.5) + + rl_child = DecisionTreeNode("rl_child", config, parent=r_child, prob=0.4) + rr_child = DecisionTreeNode("rr_child", config, parent=r_child, prob=0.5) + + # 3rd level + lll_child = DecisionTreeNode("lll_child", config, parent=ll_child, prob=1) + + assert ll_child.is_leaves_prob_sum_one() # One child with p = 1 + + assert l_child.is_leaves_prob_sum_one() # Two children w/ p1 = p2 = 0.5 + + assert not r_child.is_leaves_prob_sum_one() # Two children w/ p1 + p2 != 1 + + assert not root.is_leaves_prob_sum_one() diff --git a/tests/unittests/test_model.py b/tests/unittests/test_model.py index 225e0451..b5efe767 100644 --- a/tests/unittests/test_model.py +++ b/tests/unittests/test_model.py @@ -42,8 +42,8 @@ ( "my_constraint", 2 * var("my_var"), - None, - None, + literal(-float("inf")), + literal(float("inf")), "my_constraint", 2 * var("my_var"), literal(-float("inf")), @@ -52,8 +52,8 @@ ( "my_constraint", 2 * var("my_var") <= param("p"), - None, - None, + literal(-float("inf")), + literal(float("inf")), "my_constraint", 2 * var("my_var") - param("p"), literal(-float("inf")), @@ -62,8 +62,8 @@ ( "my_constraint", 2 * var("my_var") >= param("p"), - None, - None, + literal(-float("inf")), + literal(float("inf")), "my_constraint", 2 * var("my_var") - param("p"), literal(0), @@ -72,8 +72,8 @@ ( "my_constraint", 2 * var("my_var") == param("p"), - None, - None, + literal(-float("inf")), + literal(float("inf")), "my_constraint", 2 * var("my_var") - param("p"), literal(0), @@ -82,8 +82,8 @@ ( "my_constraint", 2 * var("my_var").expec() == param("p"), - None, - None, + literal(-float("inf")), + literal(float("inf")), "my_constraint", 2 * var("my_var").expec() - param("p"), literal(0), @@ -92,8 +92,8 @@ ( "my_constraint", 2 * var("my_var").shift(-1) == param("p"), - None, - None, + literal(-float("inf")), + literal(float("inf")), "my_constraint", 2 * var("my_var").shift(-1) - param("p"), literal(0), From cf7282a4abac3a41d7da7ad00eb2641d04f42b0b Mon Sep 17 00:00:00 2001 From: Ian Menezes Date: Wed, 24 Jul 2024 12:54:43 +0200 Subject: [PATCH 10/12] Squashed merged commit from 'main' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 7fa89213801af1f57e340bcc40b0dac1eb6c4af7 Merge: 964297a 0074e94 Author: Ian Menezes Date: Wed Jul 24 11:43:38 2024 +0200 Merge branch 'main' into feature/investment_pathway commit 0074e9474276b99f09bbb026cf3fa2a240b73602 Author: ianmnz Date: Wed Jul 24 10:18:58 2024 +0200 Add scenario benders test, bounds validation, and support for bool variables (#46) * Cherry-picked solver var name prefixes and suffixes * Added scenario tests for benders decomposed * Added BoolVar type * Added check for upper-lower variable bounds * Added index struct check for variable naming * Added check for inverted variable bounds commit a7fab1b9764ac923a130cddf85d5bcd6facf2e79 Author: Yann-Temudjin <160221141+Yann-Temudjin@users.noreply.github.com> Date: Mon Jun 17 17:02:47 2024 +0200 Short term storage complex (#27) * test Quota CO2 * reformatage du fichier test_quota_co2 * Ajout de Short term storage complex dans les modeles standard * Ajout du test electrolyzer n inputs * fix taking in account the comments on the pull request * Ajout du isclose et modification des tests pour les electrolyzer n_inputs * Fixed test 3 and 4 of the electrolyzer_n_inputs * Model SHORT_TERM_STORAGE_COMPLEX avec le même test que le SIMPLE * fixed test short term storage * fixed generate data for short term storage complex * fixed import order * Formatting change --------- Co-authored-by: Thomas Bittar commit 12ccfd792fe91c83ef9713022027d9b9e16e60cd Author: Louis Chouane <116257827+ChouaneLouis@users.noreply.github.com> Date: Mon Jun 3 10:56:05 2024 +0200 Re-implement functional tests in yaml + multiple fixes (#35) Solved multiple issues: - parsing of shift expressions was wrong - integer variables were not correctly transferred to solver - improved some error messages at parsing time Co-authored-by: vargastat commit 346d852a790106b5370f66bf452529ce313f99e1 Merge: 1c1bbd2 52b5b62 Author: ianmnz Date: Fri May 17 14:08:37 2024 +0200 Merge pull request #34 from AntaresSimulatorTeam/test_performance_large_ast Performance test for deep AST commit 52b5b62eac1f69794a6861f242d95e6dcc19f4d5 Merge: dc4940d 1c1bbd2 Author: Ian Menezes Date: Fri May 17 13:46:24 2024 +0200 Merge branch 'main' into test_performance_large_ast commit dc4940d80ac61eba4b016a5f9ca1d08dd77d3d83 Author: Ian Menezes Date: Fri May 17 13:21:28 2024 +0200 Add RecursionError raise check for tests commit f85d56ebba96805eb0918432a305914ab4f012ec Author: Ian Menezes Date: Thu May 16 16:43:10 2024 +0200 Remove unused generate_random_data function commit ead9b993d6f2bb021fafbeb07256a2d3706157ee Author: Ian Menezes Date: Thu May 16 16:19:38 2024 +0200 Performance test: check large nb of port connections commit 1c1bbd23f6878f927a6da43dfaa3d1465a93d1b7 Author: vargastat <130074062+vargastat@users.noreply.github.com> Date: Mon May 13 13:59:08 2024 +0200 Yaml format for components and TSV format for timeseries (#37) commit acc77e81e9bd4c6329ebce362400509554afa1f6 Author: Ian Menezes Date: Fri Apr 26 11:43:55 2024 +0200 Performance test: check deep AST using sum() operator commit 12218803cce00d6ed44b2a2337d3976f61004182 Merge: 9f419cd ecd3e1d Author: Florian Omnès Date: Tue Apr 23 17:36:48 2024 +0200 Merge pull request #32 from AntaresSimulatorTeam/feature/upgrade-ortools Upgrade ortools in requirements commit ecd3e1d36206858e4fc8613b5c400cc7f4549285 Author: Thomas Bittar Date: Tue Apr 23 17:28:38 2024 +0200 Remove unnecessary requirements commit be47c07da20f3aa2f2c14fca0b46fa55eae8e323 Author: Thomas Bittar Date: Tue Apr 23 17:14:10 2024 +0200 Upgrade ortools in requirements commit 9f419cd10358f1526c1006ed543bb04160cab69e Author: Juliette-Gerbaux <130555142+Juliette-Gerbaux@users.noreply.github.com> Date: Fri Apr 19 09:46:19 2024 +0200 Models/add new model (#30) * Add variant for electrolyzer subject to renewable * Add model hydro valley --------- Co-authored-by: Juliette-Gerbaux commit fb547dd8f17c6f16e0e8d28a58937c403cadd687 Author: Yann-Temudjin <160221141+Yann-Temudjin@users.noreply.github.com> Date: Fri Apr 19 09:43:20 2024 +0200 Model test yaml (#29) * test electrolyzer_n_inputs with yaml models * quota C02 test with yaml model * print to test the path * fixed path for the first n_input_electrolyzer_yaml test * fix of libs path and move of the tests to models * factorisation, removal of unused import and move of test files * reformat test * move tests of model to tests/model * Fix formatting * re-added libs_dir to model/conftest.py * added type declaration and removed unused argument --------- Co-authored-by: Thomas Bittar commit 7c7d2d5d30693fdf65c4491ef05fb4f43f5f9af9 Author: Sylvain Leclerc Date: Fri Mar 29 17:17:58 2024 +0100 Test case for AC link and PST models (#2) Addition of an ac.yml library, and associated test cases. Signed-off-by: Sylvain Leclerc commit f9f9b3a91599f6b44c41debc3fb669804717465d Author: Yann-Temudjin <160221141+Yann-Temudjin@users.noreply.github.com> Date: Fri Mar 22 15:34:36 2024 +0100 test Quota CO2 (#17) * test Quota CO2 * reformatage du fichier test_quota_co2 * Ajout de Short term storage complex dans les modeles standard * Ajout du test electrolyzer n inputs * fix taking in account the comments on the pull request * Ajout du isclose et modification des tests pour les electrolyzer n_inputs * Fixed test 3 and 4 of the electrolyzer_n_inputs commit 94f13b1f4c8f40460134eeec2883feb05ddbe231 Author: Sylvain Leclerc Date: Wed Mar 20 17:24:33 2024 +0100 Model library format parsing (#11) First implementation of library format parsing, including expressions parsing. Implementation notes: - ANTLR4 is used for expression parsing - pydantic together with PyYaml are used for yaml parsing - An additional layer of "unresolved" objects, that are the 1-to-1 image of the input data, is introduced. Once everything is read, we can resolve all objects to actually use them for computation. Note: - Last additions for investment studies have not yet been included ("context" and separation between 2 objectives). --------- Signed-off-by: Sylvain Leclerc Signed-off-by: vargastat Co-authored-by: vargastat commit c4b0abd42d8c3b66d0a11942fc74edf1483632ff Author: Juliette-Gerbaux <130555142+Juliette-Gerbaux@users.noreply.github.com> Date: Tue Mar 19 11:34:37 2024 +0100 Add flow-based models (#23) * Add flow based models * Add new model for flow based with net positions * Delete useless model * Correct errors * Rename nodes to real countries to clarify differences between models * Small corrections --------- Co-authored-by: Juliette-Gerbaux commit dfc88718a5923222c9428211e7f17993e9e758b0 Author: Juliette-Gerbaux <130555142+Juliette-Gerbaux@users.noreply.github.com> Date: Tue Mar 19 11:34:16 2024 +0100 Add models representing binding constrains (#25) * Correct error in stock model * Add model constraining total imports * Add nuclear modulation model * Add model with minimum electric vehicle load * Add hvdc model * Delete models because more efficient models are available with ports * Small corrections on names and temporal sums * Change model to take into account total import on a node * Small corrections --------- Co-authored-by: Juliette-Gerbaux commit 2e2f8bb1738db45cf326d83179a6d20cacec6810 Author: tbittar Date: Fri Mar 15 16:40:17 2024 +0100 Correct electrolyzer model typos (#26) * Fix sign mistakes * Fix sign and arrow direction --------- Co-authored-by: Thomas Bittar --- AUTHORS.txt | 1 + grammar/Expr.g4 | 78 + grammar/README.md | 17 + grammar/generate-parser.sh | 5 + .../model_library/stock_final_level.drawio | 2 +- models-design/systems/EV_load_min.drawio | 394 +++ models-design/systems/bc_total_import.drawio | 229 ++ .../systems/electrolyzer_n_inputs.drawio | 103 +- .../electrolyzer_subject_to_renewable.drawio | 298 ++- models-design/systems/flow_based.drawio | 1117 ++++++++ models-design/systems/hydro_valley.drawio | 301 +++ .../systems/nuclear_modulation.drawio | 223 ++ mypy.ini | 8 +- requirements-dev.txt | 4 + requirements.txt | 8 +- src/andromede/expression/equality.py | 29 + src/andromede/expression/expression.py | 1 + .../expression/indexing_structure.py | 12 + src/andromede/expression/parsing/__init__.py | 0 .../expression/parsing/antlr/Expr.interp | 49 + .../expression/parsing/antlr/Expr.tokens | 28 + .../expression/parsing/antlr/ExprLexer.interp | 68 + .../expression/parsing/antlr/ExprLexer.py | 1025 +++++++ .../expression/parsing/antlr/ExprLexer.tokens | 28 + .../expression/parsing/antlr/ExprParser.py | 2354 +++++++++++++++++ .../expression/parsing/antlr/ExprVisitor.py | 106 + .../expression/parsing/parse_expression.py | 259 ++ src/andromede/expression/time_operator.py | 2 +- src/andromede/libs/standard.py | 86 +- src/andromede/libs/standard_sc.py | 291 ++ src/andromede/libs/standard_sc.yml | 210 ++ src/andromede/model/common.py | 2 +- src/andromede/model/constraint.py | 14 + src/andromede/model/library.py | 31 + src/andromede/model/parsing.py | 102 + src/andromede/model/resolve_library.py | 155 ++ src/andromede/model/variable.py | 22 +- src/andromede/simulation/optimization.py | 73 +- src/andromede/study/data.py | 21 +- src/andromede/study/parsing.py | 55 + src/andromede/study/resolve_components.py | 170 ++ tests/functional/conftest.py | 33 + tests/functional/libs/lib.yml | 188 ++ tests/functional/test_andromede.py | 330 +-- tests/functional/test_andromede_yml.py | 483 ++++ tests/functional/test_performance.py | 213 +- tests/functional/test_stochastic.py | 20 +- tests/functional/test_xpansion.py | 8 +- tests/integration/test_benders_decomposed.py | 203 +- tests/models/conftest.py | 50 + tests/models/libs/ac.yml | 149 ++ tests/models/test_ac_link.py | 300 +++ tests/models/test_electrolyzer_n_inputs.py | 413 +++ .../models/test_electrolyzer_n_inputs_yaml.py | 448 ++++ tests/models/test_quota_co2.py | 94 + tests/models/test_quota_co2_yaml.py | 99 + .../models/test_short_term_storage_complex.py | 204 ++ tests/unittests/conftest.py | 19 + tests/unittests/data/components.yml | 46 + .../components_for_short_term_storage.yml | 83 + tests/unittests/data/demand-ts.txt | 10 + tests/unittests/data/gen-costs.txt | 2 + tests/unittests/data/lib.yml | 183 ++ .../data/model_port_definition_ko.yml | 30 + .../data/model_port_definition_ok.yml | 30 + .../unittests/expressions/parsing/__init__.py | 0 .../parsing/test_expression_parsing.py | 184 ++ tests/unittests/model/__init__.py | 0 tests/unittests/model/test_model_parsing.py | 169 ++ tests/unittests/study/__init__.py | 0 .../study/test_components_parsing.py | 143 + tests/unittests/test_data.py | 69 +- tests/unittests/test_model.py | 12 +- tests/unittests/test_utils.py | 22 +- 74 files changed, 11711 insertions(+), 507 deletions(-) create mode 100644 grammar/Expr.g4 create mode 100644 grammar/README.md create mode 100755 grammar/generate-parser.sh create mode 100644 models-design/systems/EV_load_min.drawio create mode 100644 models-design/systems/bc_total_import.drawio create mode 100644 models-design/systems/flow_based.drawio create mode 100644 models-design/systems/hydro_valley.drawio create mode 100644 models-design/systems/nuclear_modulation.drawio create mode 100644 src/andromede/expression/parsing/__init__.py create mode 100644 src/andromede/expression/parsing/antlr/Expr.interp create mode 100644 src/andromede/expression/parsing/antlr/Expr.tokens create mode 100644 src/andromede/expression/parsing/antlr/ExprLexer.interp create mode 100644 src/andromede/expression/parsing/antlr/ExprLexer.py create mode 100644 src/andromede/expression/parsing/antlr/ExprLexer.tokens create mode 100644 src/andromede/expression/parsing/antlr/ExprParser.py create mode 100644 src/andromede/expression/parsing/antlr/ExprVisitor.py create mode 100644 src/andromede/expression/parsing/parse_expression.py create mode 100644 src/andromede/libs/standard_sc.py create mode 100644 src/andromede/libs/standard_sc.yml create mode 100644 src/andromede/model/library.py create mode 100644 src/andromede/model/parsing.py create mode 100644 src/andromede/model/resolve_library.py create mode 100644 src/andromede/study/parsing.py create mode 100644 src/andromede/study/resolve_components.py create mode 100644 tests/functional/conftest.py create mode 100644 tests/functional/libs/lib.yml create mode 100644 tests/functional/test_andromede_yml.py create mode 100644 tests/models/conftest.py create mode 100644 tests/models/libs/ac.yml create mode 100644 tests/models/test_ac_link.py create mode 100644 tests/models/test_electrolyzer_n_inputs.py create mode 100644 tests/models/test_electrolyzer_n_inputs_yaml.py create mode 100644 tests/models/test_quota_co2.py create mode 100644 tests/models/test_quota_co2_yaml.py create mode 100644 tests/models/test_short_term_storage_complex.py create mode 100644 tests/unittests/conftest.py create mode 100644 tests/unittests/data/components.yml create mode 100644 tests/unittests/data/components_for_short_term_storage.yml create mode 100644 tests/unittests/data/demand-ts.txt create mode 100644 tests/unittests/data/gen-costs.txt create mode 100644 tests/unittests/data/lib.yml create mode 100644 tests/unittests/data/model_port_definition_ko.yml create mode 100644 tests/unittests/data/model_port_definition_ok.yml create mode 100644 tests/unittests/expressions/parsing/__init__.py create mode 100644 tests/unittests/expressions/parsing/test_expression_parsing.py create mode 100644 tests/unittests/model/__init__.py create mode 100644 tests/unittests/model/test_model_parsing.py create mode 100644 tests/unittests/study/__init__.py create mode 100644 tests/unittests/study/test_components_parsing.py diff --git a/AUTHORS.txt b/AUTHORS.txt index 4ddcc5b2..30c2bacf 100644 --- a/AUTHORS.txt +++ b/AUTHORS.txt @@ -8,3 +8,4 @@ pet-mit sylvlecl tbittar vargastat +Yann-Temudjin \ No newline at end of file diff --git a/grammar/Expr.g4 b/grammar/Expr.g4 new file mode 100644 index 00000000..072bf52e --- /dev/null +++ b/grammar/Expr.g4 @@ -0,0 +1,78 @@ +/* +Copyright (c) 2024, RTE (https://www.rte-france.com) + +See AUTHORS.txt + +This Source Code Form is subject to the terms of the Mozilla Public +License, v. 2.0. If a copy of the MPL was not distributed with this +file, You can obtain one at http://mozilla.org/MPL/2.0/. + +SPDX-License-Identifier: MPL-2.0 + +This file is part of the Antares project. +*/ + +grammar Expr; + +/* To match the whole input */ +fullexpr: expr EOF; + +expr + : atom # unsignedAtom + | IDENTIFIER '.' IDENTIFIER # portField + | '-' expr # negation + | '(' expr ')' # expression + | expr op=('/' | '*') expr # muldiv + | expr op=('+' | '-') expr # addsub + | expr COMPARISON expr # comparison + | IDENTIFIER '(' expr ')' # function + | IDENTIFIER '[' shift (',' shift)* ']' # timeShift + | IDENTIFIER '[' expr (',' expr )* ']' # timeIndex + | IDENTIFIER '[' shift1=shift '..' shift2=shift ']' # timeShiftRange + | IDENTIFIER '[' expr '..' expr ']' # timeRange + ; + +atom + : NUMBER # number + | IDENTIFIER # identifier + ; + +// a shift is required to be either "t" or "t + ..." or "t - ..." +// Note: simply defining it as "shift: TIME ('+' | '-') expr" won't work +// because the minus sign will not have the expected precedence: +// "t - d + 1" would be equivalent to "t - (d + 1)" +shift: TIME shift_expr?; + +// Because the shift MUST start with + or -, we need +// to differentiate it from generic "expr". +// A shift expression can only be extended to the right by a +// "right_expr" which cannot start with a + or -, +// unlike shift_expr itself. +// TODO: the grammar is still a little weird, because we +// allow more things in the "expr" parts of those +// shift expressions than on their left-most part +// (port fields, nested time shifts and so on). +shift_expr + : shift_expr op=('*' | '/') right_expr # shiftMuldiv + | shift_expr op=('+' | '-') right_expr # shiftAddsub + | op=('+' | '-') atom # signedAtom + | op=('+' | '-') '(' expr ')' # signedExpression + ; + +right_expr + : right_expr op=('/' | '*') right_expr # rightMuldiv + | '(' expr ')' # rightExpression + | atom # rightAtom + ; + + +fragment DIGIT : [0-9] ; +fragment CHAR : [a-zA-Z_]; +fragment CHAR_OR_DIGIT : (CHAR | DIGIT); + +NUMBER : DIGIT+ ('.' DIGIT+)?; +TIME : 't'; +IDENTIFIER : CHAR CHAR_OR_DIGIT*; +COMPARISON : ( '=' | '>=' | '<=' ); + +WS: (' ' | '\t' | '\r'| '\n') -> skip; diff --git a/grammar/README.md b/grammar/README.md new file mode 100644 index 00000000..5d51d0b5 --- /dev/null +++ b/grammar/README.md @@ -0,0 +1,17 @@ +# Expression grammar definition + +[Expr.g4](Expr.g4) defines the grammar for mathematical expressions +use for defining constraints, objective, etc. + +[ANTLR](https://www.antlr.org) needs to be used to generate the associated +parser code, which must be written to [andromede.expression.parsing.antlr](/src/andromede/expression/parsing/antlr) +package. **No other files are expected to be present in that package**. + +To achieve this you may use the provided `generate-parser.sh` script after having installed +antlr4-tools (`pip install -r requirements-dev.txt` in root directory). + +You may also, for example, use the ANTLR4 PyCharm plugin. + +We use the visitor and not the listener in order to translate ANTLR AST +into our own AST, so the options `-visitor` and `-no-listener` need to +to be used. diff --git a/grammar/generate-parser.sh b/grammar/generate-parser.sh new file mode 100755 index 00000000..e8adc5bf --- /dev/null +++ b/grammar/generate-parser.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +script_file=$(readlink -f -- "$0") +script_dir=$(dirname -- "${script_file}") +antlr4 -Dlanguage=Python3 -Werror -no-listener -visitor -o ${script_dir}/../src/andromede/expression/parsing/antlr Expr.g4 diff --git a/models-design/model_library/stock_final_level.drawio b/models-design/model_library/stock_final_level.drawio index 64f5ee3d..66d118a2 100644 --- a/models-design/model_library/stock_final_level.drawio +++ b/models-design/model_library/stock_final_level.drawio @@ -34,7 +34,7 @@ - + diff --git a/models-design/systems/EV_load_min.drawio b/models-design/systems/EV_load_min.drawio new file mode 100644 index 00000000..c6df1303 --- /dev/null +++ b/models-design/systems/EV_load_min.drawio @@ -0,0 +1,394 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/models-design/systems/bc_total_import.drawio b/models-design/systems/bc_total_import.drawio new file mode 100644 index 00000000..a132ce42 --- /dev/null +++ b/models-design/systems/bc_total_import.drawio @@ -0,0 +1,229 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/models-design/systems/electrolyzer_n_inputs.drawio b/models-design/systems/electrolyzer_n_inputs.drawio index 3c6dbc20..22120590 100644 --- a/models-design/systems/electrolyzer_n_inputs.drawio +++ b/models-design/systems/electrolyzer_n_inputs.drawio @@ -1,6 +1,6 @@ - + @@ -112,7 +112,7 @@ - + @@ -124,7 +124,7 @@ - + @@ -148,7 +148,7 @@ - + @@ -401,7 +401,7 @@ - + @@ -411,19 +411,22 @@ - - + + - + - + - + + + + @@ -542,23 +545,26 @@ - + - - + + - + - + + + + @@ -566,7 +572,7 @@ - + @@ -664,14 +670,8 @@ - - - - - - - + @@ -703,30 +703,39 @@ + + + + + + - + - + - + - - + + - + - + - + + + + @@ -845,23 +854,26 @@ - + - - + + - + - + + + + @@ -871,7 +883,7 @@ - + @@ -908,28 +920,19 @@ - + - + - + - - - - - - - - - - + diff --git a/models-design/systems/electrolyzer_subject_to_renewable.drawio b/models-design/systems/electrolyzer_subject_to_renewable.drawio index 0a40779f..1d9160de 100644 --- a/models-design/systems/electrolyzer_subject_to_renewable.drawio +++ b/models-design/systems/electrolyzer_subject_to_renewable.drawio @@ -1,6 +1,6 @@ - + @@ -430,7 +430,7 @@ - + @@ -764,7 +764,7 @@ - + @@ -1145,4 +1145,296 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/models-design/systems/flow_based.drawio b/models-design/systems/flow_based.drawio new file mode 100644 index 00000000..e4350728 --- /dev/null +++ b/models-design/systems/flow_based.drawio @@ -0,0 +1,1117 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/models-design/systems/hydro_valley.drawio b/models-design/systems/hydro_valley.drawio new file mode 100644 index 00000000..f0d1ded7 --- /dev/null +++ b/models-design/systems/hydro_valley.drawio @@ -0,0 +1,301 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/models-design/systems/nuclear_modulation.drawio b/models-design/systems/nuclear_modulation.drawio new file mode 100644 index 00000000..131cb569 --- /dev/null +++ b/models-design/systems/nuclear_modulation.drawio @@ -0,0 +1,223 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 9027a095..2af1b0b5 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,4 +8,10 @@ disallow_untyped_calls = true ignore_missing_imports = true [mypy-anytree.*] -ignore_missing_imports = true \ No newline at end of file +ignore_missing_imports = true + +[mypy-andromede.expression.parsing.antlr.*] +ignore_errors = True + +[mypy-antlr4.*] +ignore_missing_imports = true diff --git a/requirements-dev.txt b/requirements-dev.txt index 4af42e0e..647280ff 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -4,3 +4,7 @@ black~=23.7.0 isort~=5.12.0 pytest-cov pre-commit~=3.5.0 +types-PyYAML~=6.0.12.12 +antlr4-tools~=0.2.1 +pandas~=2.0.3 +pandas-stubs<=2.0.3 diff --git a/requirements.txt b/requirements.txt index badd6e69..062461cf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ -absl-py==1.4.0 numpy==1.24.4 -ortools==9.6.2534 -protobuf==4.23.3 +ortools==9.9.3963 scipy==1.10.1 anytree==2.12.1 - +antlr4-python3-runtime==4.13.1 +PyYAML~=6.0.1 +pydantic~=2.6.1 diff --git a/src/andromede/expression/equality.py b/src/andromede/expression/equality.py index 7da54b5a..a2deeb27 100644 --- a/src/andromede/expression/equality.py +++ b/src/andromede/expression/equality.py @@ -30,6 +30,8 @@ BinaryOperatorNode, ExpressionRange, InstancesTimeIndex, + PortFieldAggregatorNode, + PortFieldNode, ScenarioOperatorNode, TimeAggregatorNode, TimeOperatorNode, @@ -84,6 +86,12 @@ def visit(self, left: ExpressionNode, right: ExpressionNode) -> bool: right, ScenarioOperatorNode ): return self.scenario_operator(left, right) + if isinstance(left, PortFieldNode) and isinstance(right, PortFieldNode): + return self.port_field(left, right) + if isinstance(left, PortFieldAggregatorNode) and isinstance( + right, PortFieldAggregatorNode + ): + return self.port_field_aggregator(left, right) raise NotImplementedError(f"Equality not implemented for {left.__class__}") def literal(self, left: LiteralNode, right: LiteralNode) -> bool: @@ -163,6 +171,16 @@ def scenario_operator( ) -> bool: return left.name == right.name and self.visit(left.operand, right.operand) + def port_field(self, left: PortFieldNode, right: PortFieldNode) -> bool: + return left.port_name == right.port_name and left.field_name == right.field_name + + def port_field_aggregator( + self, left: PortFieldAggregatorNode, right: PortFieldAggregatorNode + ) -> bool: + return left.aggregator == right.aggregator and self.visit( + left.operand, right.operand + ) + def expressions_equal( left: ExpressionNode, right: ExpressionNode, abs_tol: float = 0, rel_tol: float = 0 @@ -171,3 +189,14 @@ def expressions_equal( True if both expression nodes are equal. Literal values may be compared with absolute or relative tolerance. """ return EqualityVisitor(abs_tol, rel_tol).visit(left, right) + + +def expressions_equal_if_present( + lhs: Optional[ExpressionNode], rhs: Optional[ExpressionNode] +) -> bool: + if lhs is None and rhs is None: + return True + elif lhs is None or rhs is None: + return False + else: + return expressions_equal(lhs, rhs) diff --git a/src/andromede/expression/expression.py b/src/andromede/expression/expression.py index 7262326d..5b962caa 100644 --- a/src/andromede/expression/expression.py +++ b/src/andromede/expression/expression.py @@ -344,6 +344,7 @@ def expression_range( ) +@dataclass class InstancesTimeIndex: """ Defines a set of time indices on which a time operator operates. diff --git a/src/andromede/expression/indexing_structure.py b/src/andromede/expression/indexing_structure.py index e3edc0f5..746b07ff 100644 --- a/src/andromede/expression/indexing_structure.py +++ b/src/andromede/expression/indexing_structure.py @@ -26,3 +26,15 @@ def __or__(self, other: "IndexingStructure") -> "IndexingStructure": time = self.time or other.time scenario = self.scenario or other.scenario return IndexingStructure(time, scenario) + + def is_time_varying(self) -> bool: + return self.time + + def is_scenario_varying(self) -> bool: + return self.scenario + + def is_time_scenario_varying(self) -> bool: + return self.is_time_varying() and self.is_scenario_varying() + + def is_constant(self) -> bool: + return (not self.is_time_varying()) and (not self.is_scenario_varying()) diff --git a/src/andromede/expression/parsing/__init__.py b/src/andromede/expression/parsing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/andromede/expression/parsing/antlr/Expr.interp b/src/andromede/expression/parsing/antlr/Expr.interp new file mode 100644 index 00000000..bf05ae28 --- /dev/null +++ b/src/andromede/expression/parsing/antlr/Expr.interp @@ -0,0 +1,49 @@ +token literal names: +null +'.' +'-' +'(' +')' +'/' +'*' +'+' +'[' +',' +']' +'..' +null +'t' +null +null +null + +token symbolic names: +null +null +null +null +null +null +null +null +null +null +null +null +NUMBER +TIME +IDENTIFIER +COMPARISON +WS + +rule names: +fullexpr +expr +atom +shift +shift_expr +right_expr + + +atn: +[4, 1, 16, 131, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 37, 8, 1, 10, 1, 12, 1, 40, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 49, 8, 1, 10, 1, 12, 1, 52, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 70, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 81, 8, 1, 10, 1, 12, 1, 84, 9, 1, 1, 2, 1, 2, 3, 2, 88, 8, 2, 1, 3, 1, 3, 3, 3, 92, 8, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 3, 4, 102, 8, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 110, 8, 4, 10, 4, 12, 4, 113, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 121, 8, 5, 1, 5, 1, 5, 1, 5, 5, 5, 126, 8, 5, 10, 5, 12, 5, 129, 9, 5, 1, 5, 0, 3, 2, 8, 10, 6, 0, 2, 4, 6, 8, 10, 0, 2, 1, 0, 5, 6, 2, 0, 2, 2, 7, 7, 144, 0, 12, 1, 0, 0, 0, 2, 69, 1, 0, 0, 0, 4, 87, 1, 0, 0, 0, 6, 89, 1, 0, 0, 0, 8, 101, 1, 0, 0, 0, 10, 120, 1, 0, 0, 0, 12, 13, 3, 2, 1, 0, 13, 14, 5, 0, 0, 1, 14, 1, 1, 0, 0, 0, 15, 16, 6, 1, -1, 0, 16, 70, 3, 4, 2, 0, 17, 18, 5, 14, 0, 0, 18, 19, 5, 1, 0, 0, 19, 70, 5, 14, 0, 0, 20, 21, 5, 2, 0, 0, 21, 70, 3, 2, 1, 10, 22, 23, 5, 3, 0, 0, 23, 24, 3, 2, 1, 0, 24, 25, 5, 4, 0, 0, 25, 70, 1, 0, 0, 0, 26, 27, 5, 14, 0, 0, 27, 28, 5, 3, 0, 0, 28, 29, 3, 2, 1, 0, 29, 30, 5, 4, 0, 0, 30, 70, 1, 0, 0, 0, 31, 32, 5, 14, 0, 0, 32, 33, 5, 8, 0, 0, 33, 38, 3, 6, 3, 0, 34, 35, 5, 9, 0, 0, 35, 37, 3, 6, 3, 0, 36, 34, 1, 0, 0, 0, 37, 40, 1, 0, 0, 0, 38, 36, 1, 0, 0, 0, 38, 39, 1, 0, 0, 0, 39, 41, 1, 0, 0, 0, 40, 38, 1, 0, 0, 0, 41, 42, 5, 10, 0, 0, 42, 70, 1, 0, 0, 0, 43, 44, 5, 14, 0, 0, 44, 45, 5, 8, 0, 0, 45, 50, 3, 2, 1, 0, 46, 47, 5, 9, 0, 0, 47, 49, 3, 2, 1, 0, 48, 46, 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0, 51, 53, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 54, 5, 10, 0, 0, 54, 70, 1, 0, 0, 0, 55, 56, 5, 14, 0, 0, 56, 57, 5, 8, 0, 0, 57, 58, 3, 6, 3, 0, 58, 59, 5, 11, 0, 0, 59, 60, 3, 6, 3, 0, 60, 61, 5, 10, 0, 0, 61, 70, 1, 0, 0, 0, 62, 63, 5, 14, 0, 0, 63, 64, 5, 8, 0, 0, 64, 65, 3, 2, 1, 0, 65, 66, 5, 11, 0, 0, 66, 67, 3, 2, 1, 0, 67, 68, 5, 10, 0, 0, 68, 70, 1, 0, 0, 0, 69, 15, 1, 0, 0, 0, 69, 17, 1, 0, 0, 0, 69, 20, 1, 0, 0, 0, 69, 22, 1, 0, 0, 0, 69, 26, 1, 0, 0, 0, 69, 31, 1, 0, 0, 0, 69, 43, 1, 0, 0, 0, 69, 55, 1, 0, 0, 0, 69, 62, 1, 0, 0, 0, 70, 82, 1, 0, 0, 0, 71, 72, 10, 8, 0, 0, 72, 73, 7, 0, 0, 0, 73, 81, 3, 2, 1, 9, 74, 75, 10, 7, 0, 0, 75, 76, 7, 1, 0, 0, 76, 81, 3, 2, 1, 8, 77, 78, 10, 6, 0, 0, 78, 79, 5, 15, 0, 0, 79, 81, 3, 2, 1, 7, 80, 71, 1, 0, 0, 0, 80, 74, 1, 0, 0, 0, 80, 77, 1, 0, 0, 0, 81, 84, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 83, 1, 0, 0, 0, 83, 3, 1, 0, 0, 0, 84, 82, 1, 0, 0, 0, 85, 88, 5, 12, 0, 0, 86, 88, 5, 14, 0, 0, 87, 85, 1, 0, 0, 0, 87, 86, 1, 0, 0, 0, 88, 5, 1, 0, 0, 0, 89, 91, 5, 13, 0, 0, 90, 92, 3, 8, 4, 0, 91, 90, 1, 0, 0, 0, 91, 92, 1, 0, 0, 0, 92, 7, 1, 0, 0, 0, 93, 94, 6, 4, -1, 0, 94, 95, 7, 1, 0, 0, 95, 102, 3, 4, 2, 0, 96, 97, 7, 1, 0, 0, 97, 98, 5, 3, 0, 0, 98, 99, 3, 2, 1, 0, 99, 100, 5, 4, 0, 0, 100, 102, 1, 0, 0, 0, 101, 93, 1, 0, 0, 0, 101, 96, 1, 0, 0, 0, 102, 111, 1, 0, 0, 0, 103, 104, 10, 4, 0, 0, 104, 105, 7, 0, 0, 0, 105, 110, 3, 10, 5, 0, 106, 107, 10, 3, 0, 0, 107, 108, 7, 1, 0, 0, 108, 110, 3, 10, 5, 0, 109, 103, 1, 0, 0, 0, 109, 106, 1, 0, 0, 0, 110, 113, 1, 0, 0, 0, 111, 109, 1, 0, 0, 0, 111, 112, 1, 0, 0, 0, 112, 9, 1, 0, 0, 0, 113, 111, 1, 0, 0, 0, 114, 115, 6, 5, -1, 0, 115, 116, 5, 3, 0, 0, 116, 117, 3, 2, 1, 0, 117, 118, 5, 4, 0, 0, 118, 121, 1, 0, 0, 0, 119, 121, 3, 4, 2, 0, 120, 114, 1, 0, 0, 0, 120, 119, 1, 0, 0, 0, 121, 127, 1, 0, 0, 0, 122, 123, 10, 3, 0, 0, 123, 124, 7, 0, 0, 0, 124, 126, 3, 10, 5, 4, 125, 122, 1, 0, 0, 0, 126, 129, 1, 0, 0, 0, 127, 125, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 11, 1, 0, 0, 0, 129, 127, 1, 0, 0, 0, 12, 38, 50, 69, 80, 82, 87, 91, 101, 109, 111, 120, 127] \ No newline at end of file diff --git a/src/andromede/expression/parsing/antlr/Expr.tokens b/src/andromede/expression/parsing/antlr/Expr.tokens new file mode 100644 index 00000000..9401c83a --- /dev/null +++ b/src/andromede/expression/parsing/antlr/Expr.tokens @@ -0,0 +1,28 @@ +T__0=1 +T__1=2 +T__2=3 +T__3=4 +T__4=5 +T__5=6 +T__6=7 +T__7=8 +T__8=9 +T__9=10 +T__10=11 +NUMBER=12 +TIME=13 +IDENTIFIER=14 +COMPARISON=15 +WS=16 +'.'=1 +'-'=2 +'('=3 +')'=4 +'/'=5 +'*'=6 +'+'=7 +'['=8 +','=9 +']'=10 +'..'=11 +'t'=13 diff --git a/src/andromede/expression/parsing/antlr/ExprLexer.interp b/src/andromede/expression/parsing/antlr/ExprLexer.interp new file mode 100644 index 00000000..2e85e1b7 --- /dev/null +++ b/src/andromede/expression/parsing/antlr/ExprLexer.interp @@ -0,0 +1,68 @@ +token literal names: +null +'.' +'-' +'(' +')' +'/' +'*' +'+' +'[' +',' +']' +'..' +null +'t' +null +null +null + +token symbolic names: +null +null +null +null +null +null +null +null +null +null +null +null +NUMBER +TIME +IDENTIFIER +COMPARISON +WS + +rule names: +T__0 +T__1 +T__2 +T__3 +T__4 +T__5 +T__6 +T__7 +T__8 +T__9 +T__10 +DIGIT +CHAR +CHAR_OR_DIGIT +NUMBER +TIME +IDENTIFIER +COMPARISON +WS + +channel names: +DEFAULT_TOKEN_CHANNEL +HIDDEN + +mode names: +DEFAULT_MODE + +atn: +[4, 0, 16, 103, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 3, 13, 69, 8, 13, 1, 14, 4, 14, 72, 8, 14, 11, 14, 12, 14, 73, 1, 14, 1, 14, 4, 14, 78, 8, 14, 11, 14, 12, 14, 79, 3, 14, 82, 8, 14, 1, 15, 1, 15, 1, 16, 1, 16, 5, 16, 88, 8, 16, 10, 16, 12, 16, 91, 9, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 3, 17, 98, 8, 17, 1, 18, 1, 18, 1, 18, 1, 18, 0, 0, 19, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 0, 25, 0, 27, 0, 29, 12, 31, 13, 33, 14, 35, 15, 37, 16, 1, 0, 3, 1, 0, 48, 57, 3, 0, 65, 90, 95, 95, 97, 122, 3, 0, 9, 10, 13, 13, 32, 32, 106, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 1, 39, 1, 0, 0, 0, 3, 41, 1, 0, 0, 0, 5, 43, 1, 0, 0, 0, 7, 45, 1, 0, 0, 0, 9, 47, 1, 0, 0, 0, 11, 49, 1, 0, 0, 0, 13, 51, 1, 0, 0, 0, 15, 53, 1, 0, 0, 0, 17, 55, 1, 0, 0, 0, 19, 57, 1, 0, 0, 0, 21, 59, 1, 0, 0, 0, 23, 62, 1, 0, 0, 0, 25, 64, 1, 0, 0, 0, 27, 68, 1, 0, 0, 0, 29, 71, 1, 0, 0, 0, 31, 83, 1, 0, 0, 0, 33, 85, 1, 0, 0, 0, 35, 97, 1, 0, 0, 0, 37, 99, 1, 0, 0, 0, 39, 40, 5, 46, 0, 0, 40, 2, 1, 0, 0, 0, 41, 42, 5, 45, 0, 0, 42, 4, 1, 0, 0, 0, 43, 44, 5, 40, 0, 0, 44, 6, 1, 0, 0, 0, 45, 46, 5, 41, 0, 0, 46, 8, 1, 0, 0, 0, 47, 48, 5, 47, 0, 0, 48, 10, 1, 0, 0, 0, 49, 50, 5, 42, 0, 0, 50, 12, 1, 0, 0, 0, 51, 52, 5, 43, 0, 0, 52, 14, 1, 0, 0, 0, 53, 54, 5, 91, 0, 0, 54, 16, 1, 0, 0, 0, 55, 56, 5, 44, 0, 0, 56, 18, 1, 0, 0, 0, 57, 58, 5, 93, 0, 0, 58, 20, 1, 0, 0, 0, 59, 60, 5, 46, 0, 0, 60, 61, 5, 46, 0, 0, 61, 22, 1, 0, 0, 0, 62, 63, 7, 0, 0, 0, 63, 24, 1, 0, 0, 0, 64, 65, 7, 1, 0, 0, 65, 26, 1, 0, 0, 0, 66, 69, 3, 25, 12, 0, 67, 69, 3, 23, 11, 0, 68, 66, 1, 0, 0, 0, 68, 67, 1, 0, 0, 0, 69, 28, 1, 0, 0, 0, 70, 72, 3, 23, 11, 0, 71, 70, 1, 0, 0, 0, 72, 73, 1, 0, 0, 0, 73, 71, 1, 0, 0, 0, 73, 74, 1, 0, 0, 0, 74, 81, 1, 0, 0, 0, 75, 77, 5, 46, 0, 0, 76, 78, 3, 23, 11, 0, 77, 76, 1, 0, 0, 0, 78, 79, 1, 0, 0, 0, 79, 77, 1, 0, 0, 0, 79, 80, 1, 0, 0, 0, 80, 82, 1, 0, 0, 0, 81, 75, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 30, 1, 0, 0, 0, 83, 84, 5, 116, 0, 0, 84, 32, 1, 0, 0, 0, 85, 89, 3, 25, 12, 0, 86, 88, 3, 27, 13, 0, 87, 86, 1, 0, 0, 0, 88, 91, 1, 0, 0, 0, 89, 87, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 34, 1, 0, 0, 0, 91, 89, 1, 0, 0, 0, 92, 98, 5, 61, 0, 0, 93, 94, 5, 62, 0, 0, 94, 98, 5, 61, 0, 0, 95, 96, 5, 60, 0, 0, 96, 98, 5, 61, 0, 0, 97, 92, 1, 0, 0, 0, 97, 93, 1, 0, 0, 0, 97, 95, 1, 0, 0, 0, 98, 36, 1, 0, 0, 0, 99, 100, 7, 2, 0, 0, 100, 101, 1, 0, 0, 0, 101, 102, 6, 18, 0, 0, 102, 38, 1, 0, 0, 0, 7, 0, 68, 73, 79, 81, 89, 97, 1, 6, 0, 0] \ No newline at end of file diff --git a/src/andromede/expression/parsing/antlr/ExprLexer.py b/src/andromede/expression/parsing/antlr/ExprLexer.py new file mode 100644 index 00000000..1ad7f368 --- /dev/null +++ b/src/andromede/expression/parsing/antlr/ExprLexer.py @@ -0,0 +1,1025 @@ +# Generated from Expr.g4 by ANTLR 4.13.1 +import sys +from io import StringIO + +from antlr4 import * + +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + + +def serializedATN(): + return [ + 4, + 0, + 16, + 103, + 6, + -1, + 2, + 0, + 7, + 0, + 2, + 1, + 7, + 1, + 2, + 2, + 7, + 2, + 2, + 3, + 7, + 3, + 2, + 4, + 7, + 4, + 2, + 5, + 7, + 5, + 2, + 6, + 7, + 6, + 2, + 7, + 7, + 7, + 2, + 8, + 7, + 8, + 2, + 9, + 7, + 9, + 2, + 10, + 7, + 10, + 2, + 11, + 7, + 11, + 2, + 12, + 7, + 12, + 2, + 13, + 7, + 13, + 2, + 14, + 7, + 14, + 2, + 15, + 7, + 15, + 2, + 16, + 7, + 16, + 2, + 17, + 7, + 17, + 2, + 18, + 7, + 18, + 1, + 0, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 2, + 1, + 2, + 1, + 3, + 1, + 3, + 1, + 4, + 1, + 4, + 1, + 5, + 1, + 5, + 1, + 6, + 1, + 6, + 1, + 7, + 1, + 7, + 1, + 8, + 1, + 8, + 1, + 9, + 1, + 9, + 1, + 10, + 1, + 10, + 1, + 10, + 1, + 11, + 1, + 11, + 1, + 12, + 1, + 12, + 1, + 13, + 1, + 13, + 3, + 13, + 69, + 8, + 13, + 1, + 14, + 4, + 14, + 72, + 8, + 14, + 11, + 14, + 12, + 14, + 73, + 1, + 14, + 1, + 14, + 4, + 14, + 78, + 8, + 14, + 11, + 14, + 12, + 14, + 79, + 3, + 14, + 82, + 8, + 14, + 1, + 15, + 1, + 15, + 1, + 16, + 1, + 16, + 5, + 16, + 88, + 8, + 16, + 10, + 16, + 12, + 16, + 91, + 9, + 16, + 1, + 17, + 1, + 17, + 1, + 17, + 1, + 17, + 1, + 17, + 3, + 17, + 98, + 8, + 17, + 1, + 18, + 1, + 18, + 1, + 18, + 1, + 18, + 0, + 0, + 19, + 1, + 1, + 3, + 2, + 5, + 3, + 7, + 4, + 9, + 5, + 11, + 6, + 13, + 7, + 15, + 8, + 17, + 9, + 19, + 10, + 21, + 11, + 23, + 0, + 25, + 0, + 27, + 0, + 29, + 12, + 31, + 13, + 33, + 14, + 35, + 15, + 37, + 16, + 1, + 0, + 3, + 1, + 0, + 48, + 57, + 3, + 0, + 65, + 90, + 95, + 95, + 97, + 122, + 3, + 0, + 9, + 10, + 13, + 13, + 32, + 32, + 106, + 0, + 1, + 1, + 0, + 0, + 0, + 0, + 3, + 1, + 0, + 0, + 0, + 0, + 5, + 1, + 0, + 0, + 0, + 0, + 7, + 1, + 0, + 0, + 0, + 0, + 9, + 1, + 0, + 0, + 0, + 0, + 11, + 1, + 0, + 0, + 0, + 0, + 13, + 1, + 0, + 0, + 0, + 0, + 15, + 1, + 0, + 0, + 0, + 0, + 17, + 1, + 0, + 0, + 0, + 0, + 19, + 1, + 0, + 0, + 0, + 0, + 21, + 1, + 0, + 0, + 0, + 0, + 29, + 1, + 0, + 0, + 0, + 0, + 31, + 1, + 0, + 0, + 0, + 0, + 33, + 1, + 0, + 0, + 0, + 0, + 35, + 1, + 0, + 0, + 0, + 0, + 37, + 1, + 0, + 0, + 0, + 1, + 39, + 1, + 0, + 0, + 0, + 3, + 41, + 1, + 0, + 0, + 0, + 5, + 43, + 1, + 0, + 0, + 0, + 7, + 45, + 1, + 0, + 0, + 0, + 9, + 47, + 1, + 0, + 0, + 0, + 11, + 49, + 1, + 0, + 0, + 0, + 13, + 51, + 1, + 0, + 0, + 0, + 15, + 53, + 1, + 0, + 0, + 0, + 17, + 55, + 1, + 0, + 0, + 0, + 19, + 57, + 1, + 0, + 0, + 0, + 21, + 59, + 1, + 0, + 0, + 0, + 23, + 62, + 1, + 0, + 0, + 0, + 25, + 64, + 1, + 0, + 0, + 0, + 27, + 68, + 1, + 0, + 0, + 0, + 29, + 71, + 1, + 0, + 0, + 0, + 31, + 83, + 1, + 0, + 0, + 0, + 33, + 85, + 1, + 0, + 0, + 0, + 35, + 97, + 1, + 0, + 0, + 0, + 37, + 99, + 1, + 0, + 0, + 0, + 39, + 40, + 5, + 46, + 0, + 0, + 40, + 2, + 1, + 0, + 0, + 0, + 41, + 42, + 5, + 45, + 0, + 0, + 42, + 4, + 1, + 0, + 0, + 0, + 43, + 44, + 5, + 40, + 0, + 0, + 44, + 6, + 1, + 0, + 0, + 0, + 45, + 46, + 5, + 41, + 0, + 0, + 46, + 8, + 1, + 0, + 0, + 0, + 47, + 48, + 5, + 47, + 0, + 0, + 48, + 10, + 1, + 0, + 0, + 0, + 49, + 50, + 5, + 42, + 0, + 0, + 50, + 12, + 1, + 0, + 0, + 0, + 51, + 52, + 5, + 43, + 0, + 0, + 52, + 14, + 1, + 0, + 0, + 0, + 53, + 54, + 5, + 91, + 0, + 0, + 54, + 16, + 1, + 0, + 0, + 0, + 55, + 56, + 5, + 44, + 0, + 0, + 56, + 18, + 1, + 0, + 0, + 0, + 57, + 58, + 5, + 93, + 0, + 0, + 58, + 20, + 1, + 0, + 0, + 0, + 59, + 60, + 5, + 46, + 0, + 0, + 60, + 61, + 5, + 46, + 0, + 0, + 61, + 22, + 1, + 0, + 0, + 0, + 62, + 63, + 7, + 0, + 0, + 0, + 63, + 24, + 1, + 0, + 0, + 0, + 64, + 65, + 7, + 1, + 0, + 0, + 65, + 26, + 1, + 0, + 0, + 0, + 66, + 69, + 3, + 25, + 12, + 0, + 67, + 69, + 3, + 23, + 11, + 0, + 68, + 66, + 1, + 0, + 0, + 0, + 68, + 67, + 1, + 0, + 0, + 0, + 69, + 28, + 1, + 0, + 0, + 0, + 70, + 72, + 3, + 23, + 11, + 0, + 71, + 70, + 1, + 0, + 0, + 0, + 72, + 73, + 1, + 0, + 0, + 0, + 73, + 71, + 1, + 0, + 0, + 0, + 73, + 74, + 1, + 0, + 0, + 0, + 74, + 81, + 1, + 0, + 0, + 0, + 75, + 77, + 5, + 46, + 0, + 0, + 76, + 78, + 3, + 23, + 11, + 0, + 77, + 76, + 1, + 0, + 0, + 0, + 78, + 79, + 1, + 0, + 0, + 0, + 79, + 77, + 1, + 0, + 0, + 0, + 79, + 80, + 1, + 0, + 0, + 0, + 80, + 82, + 1, + 0, + 0, + 0, + 81, + 75, + 1, + 0, + 0, + 0, + 81, + 82, + 1, + 0, + 0, + 0, + 82, + 30, + 1, + 0, + 0, + 0, + 83, + 84, + 5, + 116, + 0, + 0, + 84, + 32, + 1, + 0, + 0, + 0, + 85, + 89, + 3, + 25, + 12, + 0, + 86, + 88, + 3, + 27, + 13, + 0, + 87, + 86, + 1, + 0, + 0, + 0, + 88, + 91, + 1, + 0, + 0, + 0, + 89, + 87, + 1, + 0, + 0, + 0, + 89, + 90, + 1, + 0, + 0, + 0, + 90, + 34, + 1, + 0, + 0, + 0, + 91, + 89, + 1, + 0, + 0, + 0, + 92, + 98, + 5, + 61, + 0, + 0, + 93, + 94, + 5, + 62, + 0, + 0, + 94, + 98, + 5, + 61, + 0, + 0, + 95, + 96, + 5, + 60, + 0, + 0, + 96, + 98, + 5, + 61, + 0, + 0, + 97, + 92, + 1, + 0, + 0, + 0, + 97, + 93, + 1, + 0, + 0, + 0, + 97, + 95, + 1, + 0, + 0, + 0, + 98, + 36, + 1, + 0, + 0, + 0, + 99, + 100, + 7, + 2, + 0, + 0, + 100, + 101, + 1, + 0, + 0, + 0, + 101, + 102, + 6, + 18, + 0, + 0, + 102, + 38, + 1, + 0, + 0, + 0, + 7, + 0, + 68, + 73, + 79, + 81, + 89, + 97, + 1, + 6, + 0, + 0, + ] + + +class ExprLexer(Lexer): + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)] + + T__0 = 1 + T__1 = 2 + T__2 = 3 + T__3 = 4 + T__4 = 5 + T__5 = 6 + T__6 = 7 + T__7 = 8 + T__8 = 9 + T__9 = 10 + T__10 = 11 + NUMBER = 12 + TIME = 13 + IDENTIFIER = 14 + COMPARISON = 15 + WS = 16 + + channelNames = ["DEFAULT_TOKEN_CHANNEL", "HIDDEN"] + + modeNames = ["DEFAULT_MODE"] + + literalNames = [ + "", + "'.'", + "'-'", + "'('", + "')'", + "'/'", + "'*'", + "'+'", + "'['", + "','", + "']'", + "'..'", + "'t'", + ] + + symbolicNames = ["", "NUMBER", "TIME", "IDENTIFIER", "COMPARISON", "WS"] + + ruleNames = [ + "T__0", + "T__1", + "T__2", + "T__3", + "T__4", + "T__5", + "T__6", + "T__7", + "T__8", + "T__9", + "T__10", + "DIGIT", + "CHAR", + "CHAR_OR_DIGIT", + "NUMBER", + "TIME", + "IDENTIFIER", + "COMPARISON", + "WS", + ] + + grammarFileName = "Expr.g4" + + def __init__(self, input=None, output: TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.13.1") + self._interp = LexerATNSimulator( + self, self.atn, self.decisionsToDFA, PredictionContextCache() + ) + self._actions = None + self._predicates = None diff --git a/src/andromede/expression/parsing/antlr/ExprLexer.tokens b/src/andromede/expression/parsing/antlr/ExprLexer.tokens new file mode 100644 index 00000000..9401c83a --- /dev/null +++ b/src/andromede/expression/parsing/antlr/ExprLexer.tokens @@ -0,0 +1,28 @@ +T__0=1 +T__1=2 +T__2=3 +T__3=4 +T__4=5 +T__5=6 +T__6=7 +T__7=8 +T__8=9 +T__9=10 +T__10=11 +NUMBER=12 +TIME=13 +IDENTIFIER=14 +COMPARISON=15 +WS=16 +'.'=1 +'-'=2 +'('=3 +')'=4 +'/'=5 +'*'=6 +'+'=7 +'['=8 +','=9 +']'=10 +'..'=11 +'t'=13 diff --git a/src/andromede/expression/parsing/antlr/ExprParser.py b/src/andromede/expression/parsing/antlr/ExprParser.py new file mode 100644 index 00000000..8f312fe9 --- /dev/null +++ b/src/andromede/expression/parsing/antlr/ExprParser.py @@ -0,0 +1,2354 @@ +# Generated from Expr.g4 by ANTLR 4.13.1 +# encoding: utf-8 +import sys +from io import StringIO + +from antlr4 import * + +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + + +def serializedATN(): + return [ + 4, + 1, + 16, + 131, + 2, + 0, + 7, + 0, + 2, + 1, + 7, + 1, + 2, + 2, + 7, + 2, + 2, + 3, + 7, + 3, + 2, + 4, + 7, + 4, + 2, + 5, + 7, + 5, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 5, + 1, + 37, + 8, + 1, + 10, + 1, + 12, + 1, + 40, + 9, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 5, + 1, + 49, + 8, + 1, + 10, + 1, + 12, + 1, + 52, + 9, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 3, + 1, + 70, + 8, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 5, + 1, + 81, + 8, + 1, + 10, + 1, + 12, + 1, + 84, + 9, + 1, + 1, + 2, + 1, + 2, + 3, + 2, + 88, + 8, + 2, + 1, + 3, + 1, + 3, + 3, + 3, + 92, + 8, + 3, + 1, + 4, + 1, + 4, + 1, + 4, + 1, + 4, + 1, + 4, + 1, + 4, + 1, + 4, + 1, + 4, + 3, + 4, + 102, + 8, + 4, + 1, + 4, + 1, + 4, + 1, + 4, + 1, + 4, + 1, + 4, + 1, + 4, + 5, + 4, + 110, + 8, + 4, + 10, + 4, + 12, + 4, + 113, + 9, + 4, + 1, + 5, + 1, + 5, + 1, + 5, + 1, + 5, + 1, + 5, + 1, + 5, + 3, + 5, + 121, + 8, + 5, + 1, + 5, + 1, + 5, + 1, + 5, + 5, + 5, + 126, + 8, + 5, + 10, + 5, + 12, + 5, + 129, + 9, + 5, + 1, + 5, + 0, + 3, + 2, + 8, + 10, + 6, + 0, + 2, + 4, + 6, + 8, + 10, + 0, + 2, + 1, + 0, + 5, + 6, + 2, + 0, + 2, + 2, + 7, + 7, + 144, + 0, + 12, + 1, + 0, + 0, + 0, + 2, + 69, + 1, + 0, + 0, + 0, + 4, + 87, + 1, + 0, + 0, + 0, + 6, + 89, + 1, + 0, + 0, + 0, + 8, + 101, + 1, + 0, + 0, + 0, + 10, + 120, + 1, + 0, + 0, + 0, + 12, + 13, + 3, + 2, + 1, + 0, + 13, + 14, + 5, + 0, + 0, + 1, + 14, + 1, + 1, + 0, + 0, + 0, + 15, + 16, + 6, + 1, + -1, + 0, + 16, + 70, + 3, + 4, + 2, + 0, + 17, + 18, + 5, + 14, + 0, + 0, + 18, + 19, + 5, + 1, + 0, + 0, + 19, + 70, + 5, + 14, + 0, + 0, + 20, + 21, + 5, + 2, + 0, + 0, + 21, + 70, + 3, + 2, + 1, + 10, + 22, + 23, + 5, + 3, + 0, + 0, + 23, + 24, + 3, + 2, + 1, + 0, + 24, + 25, + 5, + 4, + 0, + 0, + 25, + 70, + 1, + 0, + 0, + 0, + 26, + 27, + 5, + 14, + 0, + 0, + 27, + 28, + 5, + 3, + 0, + 0, + 28, + 29, + 3, + 2, + 1, + 0, + 29, + 30, + 5, + 4, + 0, + 0, + 30, + 70, + 1, + 0, + 0, + 0, + 31, + 32, + 5, + 14, + 0, + 0, + 32, + 33, + 5, + 8, + 0, + 0, + 33, + 38, + 3, + 6, + 3, + 0, + 34, + 35, + 5, + 9, + 0, + 0, + 35, + 37, + 3, + 6, + 3, + 0, + 36, + 34, + 1, + 0, + 0, + 0, + 37, + 40, + 1, + 0, + 0, + 0, + 38, + 36, + 1, + 0, + 0, + 0, + 38, + 39, + 1, + 0, + 0, + 0, + 39, + 41, + 1, + 0, + 0, + 0, + 40, + 38, + 1, + 0, + 0, + 0, + 41, + 42, + 5, + 10, + 0, + 0, + 42, + 70, + 1, + 0, + 0, + 0, + 43, + 44, + 5, + 14, + 0, + 0, + 44, + 45, + 5, + 8, + 0, + 0, + 45, + 50, + 3, + 2, + 1, + 0, + 46, + 47, + 5, + 9, + 0, + 0, + 47, + 49, + 3, + 2, + 1, + 0, + 48, + 46, + 1, + 0, + 0, + 0, + 49, + 52, + 1, + 0, + 0, + 0, + 50, + 48, + 1, + 0, + 0, + 0, + 50, + 51, + 1, + 0, + 0, + 0, + 51, + 53, + 1, + 0, + 0, + 0, + 52, + 50, + 1, + 0, + 0, + 0, + 53, + 54, + 5, + 10, + 0, + 0, + 54, + 70, + 1, + 0, + 0, + 0, + 55, + 56, + 5, + 14, + 0, + 0, + 56, + 57, + 5, + 8, + 0, + 0, + 57, + 58, + 3, + 6, + 3, + 0, + 58, + 59, + 5, + 11, + 0, + 0, + 59, + 60, + 3, + 6, + 3, + 0, + 60, + 61, + 5, + 10, + 0, + 0, + 61, + 70, + 1, + 0, + 0, + 0, + 62, + 63, + 5, + 14, + 0, + 0, + 63, + 64, + 5, + 8, + 0, + 0, + 64, + 65, + 3, + 2, + 1, + 0, + 65, + 66, + 5, + 11, + 0, + 0, + 66, + 67, + 3, + 2, + 1, + 0, + 67, + 68, + 5, + 10, + 0, + 0, + 68, + 70, + 1, + 0, + 0, + 0, + 69, + 15, + 1, + 0, + 0, + 0, + 69, + 17, + 1, + 0, + 0, + 0, + 69, + 20, + 1, + 0, + 0, + 0, + 69, + 22, + 1, + 0, + 0, + 0, + 69, + 26, + 1, + 0, + 0, + 0, + 69, + 31, + 1, + 0, + 0, + 0, + 69, + 43, + 1, + 0, + 0, + 0, + 69, + 55, + 1, + 0, + 0, + 0, + 69, + 62, + 1, + 0, + 0, + 0, + 70, + 82, + 1, + 0, + 0, + 0, + 71, + 72, + 10, + 8, + 0, + 0, + 72, + 73, + 7, + 0, + 0, + 0, + 73, + 81, + 3, + 2, + 1, + 9, + 74, + 75, + 10, + 7, + 0, + 0, + 75, + 76, + 7, + 1, + 0, + 0, + 76, + 81, + 3, + 2, + 1, + 8, + 77, + 78, + 10, + 6, + 0, + 0, + 78, + 79, + 5, + 15, + 0, + 0, + 79, + 81, + 3, + 2, + 1, + 7, + 80, + 71, + 1, + 0, + 0, + 0, + 80, + 74, + 1, + 0, + 0, + 0, + 80, + 77, + 1, + 0, + 0, + 0, + 81, + 84, + 1, + 0, + 0, + 0, + 82, + 80, + 1, + 0, + 0, + 0, + 82, + 83, + 1, + 0, + 0, + 0, + 83, + 3, + 1, + 0, + 0, + 0, + 84, + 82, + 1, + 0, + 0, + 0, + 85, + 88, + 5, + 12, + 0, + 0, + 86, + 88, + 5, + 14, + 0, + 0, + 87, + 85, + 1, + 0, + 0, + 0, + 87, + 86, + 1, + 0, + 0, + 0, + 88, + 5, + 1, + 0, + 0, + 0, + 89, + 91, + 5, + 13, + 0, + 0, + 90, + 92, + 3, + 8, + 4, + 0, + 91, + 90, + 1, + 0, + 0, + 0, + 91, + 92, + 1, + 0, + 0, + 0, + 92, + 7, + 1, + 0, + 0, + 0, + 93, + 94, + 6, + 4, + -1, + 0, + 94, + 95, + 7, + 1, + 0, + 0, + 95, + 102, + 3, + 4, + 2, + 0, + 96, + 97, + 7, + 1, + 0, + 0, + 97, + 98, + 5, + 3, + 0, + 0, + 98, + 99, + 3, + 2, + 1, + 0, + 99, + 100, + 5, + 4, + 0, + 0, + 100, + 102, + 1, + 0, + 0, + 0, + 101, + 93, + 1, + 0, + 0, + 0, + 101, + 96, + 1, + 0, + 0, + 0, + 102, + 111, + 1, + 0, + 0, + 0, + 103, + 104, + 10, + 4, + 0, + 0, + 104, + 105, + 7, + 0, + 0, + 0, + 105, + 110, + 3, + 10, + 5, + 0, + 106, + 107, + 10, + 3, + 0, + 0, + 107, + 108, + 7, + 1, + 0, + 0, + 108, + 110, + 3, + 10, + 5, + 0, + 109, + 103, + 1, + 0, + 0, + 0, + 109, + 106, + 1, + 0, + 0, + 0, + 110, + 113, + 1, + 0, + 0, + 0, + 111, + 109, + 1, + 0, + 0, + 0, + 111, + 112, + 1, + 0, + 0, + 0, + 112, + 9, + 1, + 0, + 0, + 0, + 113, + 111, + 1, + 0, + 0, + 0, + 114, + 115, + 6, + 5, + -1, + 0, + 115, + 116, + 5, + 3, + 0, + 0, + 116, + 117, + 3, + 2, + 1, + 0, + 117, + 118, + 5, + 4, + 0, + 0, + 118, + 121, + 1, + 0, + 0, + 0, + 119, + 121, + 3, + 4, + 2, + 0, + 120, + 114, + 1, + 0, + 0, + 0, + 120, + 119, + 1, + 0, + 0, + 0, + 121, + 127, + 1, + 0, + 0, + 0, + 122, + 123, + 10, + 3, + 0, + 0, + 123, + 124, + 7, + 0, + 0, + 0, + 124, + 126, + 3, + 10, + 5, + 4, + 125, + 122, + 1, + 0, + 0, + 0, + 126, + 129, + 1, + 0, + 0, + 0, + 127, + 125, + 1, + 0, + 0, + 0, + 127, + 128, + 1, + 0, + 0, + 0, + 128, + 11, + 1, + 0, + 0, + 0, + 129, + 127, + 1, + 0, + 0, + 0, + 12, + 38, + 50, + 69, + 80, + 82, + 87, + 91, + 101, + 109, + 111, + 120, + 127, + ] + + +class ExprParser(Parser): + grammarFileName = "Expr.g4" + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)] + + sharedContextCache = PredictionContextCache() + + literalNames = [ + "", + "'.'", + "'-'", + "'('", + "')'", + "'/'", + "'*'", + "'+'", + "'['", + "','", + "']'", + "'..'", + "", + "'t'", + ] + + symbolicNames = [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "NUMBER", + "TIME", + "IDENTIFIER", + "COMPARISON", + "WS", + ] + + RULE_fullexpr = 0 + RULE_expr = 1 + RULE_atom = 2 + RULE_shift = 3 + RULE_shift_expr = 4 + RULE_right_expr = 5 + + ruleNames = ["fullexpr", "expr", "atom", "shift", "shift_expr", "right_expr"] + + EOF = Token.EOF + T__0 = 1 + T__1 = 2 + T__2 = 3 + T__3 = 4 + T__4 = 5 + T__5 = 6 + T__6 = 7 + T__7 = 8 + T__8 = 9 + T__9 = 10 + T__10 = 11 + NUMBER = 12 + TIME = 13 + IDENTIFIER = 14 + COMPARISON = 15 + WS = 16 + + def __init__(self, input: TokenStream, output: TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.13.1") + self._interp = ParserATNSimulator( + self, self.atn, self.decisionsToDFA, self.sharedContextCache + ) + self._predicates = None + + class FullexprContext(ParserRuleContext): + __slots__ = "parser" + + def __init__( + self, parser, parent: ParserRuleContext = None, invokingState: int = -1 + ): + super().__init__(parent, invokingState) + self.parser = parser + + def expr(self): + return self.getTypedRuleContext(ExprParser.ExprContext, 0) + + def EOF(self): + return self.getToken(ExprParser.EOF, 0) + + def getRuleIndex(self): + return ExprParser.RULE_fullexpr + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitFullexpr"): + return visitor.visitFullexpr(self) + else: + return visitor.visitChildren(self) + + def fullexpr(self): + localctx = ExprParser.FullexprContext(self, self._ctx, self.state) + self.enterRule(localctx, 0, self.RULE_fullexpr) + try: + self.enterOuterAlt(localctx, 1) + self.state = 12 + self.expr(0) + self.state = 13 + self.match(ExprParser.EOF) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ExprContext(ParserRuleContext): + __slots__ = "parser" + + def __init__( + self, parser, parent: ParserRuleContext = None, invokingState: int = -1 + ): + super().__init__(parent, invokingState) + self.parser = parser + + def getRuleIndex(self): + return ExprParser.RULE_expr + + def copyFrom(self, ctx: ParserRuleContext): + super().copyFrom(ctx) + + class NegationContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.copyFrom(ctx) + + def expr(self): + return self.getTypedRuleContext(ExprParser.ExprContext, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitNegation"): + return visitor.visitNegation(self) + else: + return visitor.visitChildren(self) + + class UnsignedAtomContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.copyFrom(ctx) + + def atom(self): + return self.getTypedRuleContext(ExprParser.AtomContext, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitUnsignedAtom"): + return visitor.visitUnsignedAtom(self) + else: + return visitor.visitChildren(self) + + class ExpressionContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.copyFrom(ctx) + + def expr(self): + return self.getTypedRuleContext(ExprParser.ExprContext, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitExpression"): + return visitor.visitExpression(self) + else: + return visitor.visitChildren(self) + + class TimeIndexContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.copyFrom(ctx) + + def IDENTIFIER(self): + return self.getToken(ExprParser.IDENTIFIER, 0) + + def expr(self, i: int = None): + if i is None: + return self.getTypedRuleContexts(ExprParser.ExprContext) + else: + return self.getTypedRuleContext(ExprParser.ExprContext, i) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitTimeIndex"): + return visitor.visitTimeIndex(self) + else: + return visitor.visitChildren(self) + + class ComparisonContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.copyFrom(ctx) + + def expr(self, i: int = None): + if i is None: + return self.getTypedRuleContexts(ExprParser.ExprContext) + else: + return self.getTypedRuleContext(ExprParser.ExprContext, i) + + def COMPARISON(self): + return self.getToken(ExprParser.COMPARISON, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitComparison"): + return visitor.visitComparison(self) + else: + return visitor.visitChildren(self) + + class TimeShiftContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.copyFrom(ctx) + + def IDENTIFIER(self): + return self.getToken(ExprParser.IDENTIFIER, 0) + + def shift(self, i: int = None): + if i is None: + return self.getTypedRuleContexts(ExprParser.ShiftContext) + else: + return self.getTypedRuleContext(ExprParser.ShiftContext, i) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitTimeShift"): + return visitor.visitTimeShift(self) + else: + return visitor.visitChildren(self) + + class FunctionContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.copyFrom(ctx) + + def IDENTIFIER(self): + return self.getToken(ExprParser.IDENTIFIER, 0) + + def expr(self): + return self.getTypedRuleContext(ExprParser.ExprContext, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitFunction"): + return visitor.visitFunction(self) + else: + return visitor.visitChildren(self) + + class AddsubContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.op = None # Token + self.copyFrom(ctx) + + def expr(self, i: int = None): + if i is None: + return self.getTypedRuleContexts(ExprParser.ExprContext) + else: + return self.getTypedRuleContext(ExprParser.ExprContext, i) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitAddsub"): + return visitor.visitAddsub(self) + else: + return visitor.visitChildren(self) + + class TimeShiftRangeContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.shift1 = None # ShiftContext + self.shift2 = None # ShiftContext + self.copyFrom(ctx) + + def IDENTIFIER(self): + return self.getToken(ExprParser.IDENTIFIER, 0) + + def shift(self, i: int = None): + if i is None: + return self.getTypedRuleContexts(ExprParser.ShiftContext) + else: + return self.getTypedRuleContext(ExprParser.ShiftContext, i) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitTimeShiftRange"): + return visitor.visitTimeShiftRange(self) + else: + return visitor.visitChildren(self) + + class PortFieldContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.copyFrom(ctx) + + def IDENTIFIER(self, i: int = None): + if i is None: + return self.getTokens(ExprParser.IDENTIFIER) + else: + return self.getToken(ExprParser.IDENTIFIER, i) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitPortField"): + return visitor.visitPortField(self) + else: + return visitor.visitChildren(self) + + class MuldivContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.op = None # Token + self.copyFrom(ctx) + + def expr(self, i: int = None): + if i is None: + return self.getTypedRuleContexts(ExprParser.ExprContext) + else: + return self.getTypedRuleContext(ExprParser.ExprContext, i) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitMuldiv"): + return visitor.visitMuldiv(self) + else: + return visitor.visitChildren(self) + + class TimeRangeContext(ExprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.ExprContext + super().__init__(parser) + self.copyFrom(ctx) + + def IDENTIFIER(self): + return self.getToken(ExprParser.IDENTIFIER, 0) + + def expr(self, i: int = None): + if i is None: + return self.getTypedRuleContexts(ExprParser.ExprContext) + else: + return self.getTypedRuleContext(ExprParser.ExprContext, i) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitTimeRange"): + return visitor.visitTimeRange(self) + else: + return visitor.visitChildren(self) + + def expr(self, _p: int = 0): + _parentctx = self._ctx + _parentState = self.state + localctx = ExprParser.ExprContext(self, self._ctx, _parentState) + _prevctx = localctx + _startState = 2 + self.enterRecursionRule(localctx, 2, self.RULE_expr, _p) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 69 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input, 2, self._ctx) + if la_ == 1: + localctx = ExprParser.UnsignedAtomContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + + self.state = 16 + self.atom() + pass + + elif la_ == 2: + localctx = ExprParser.PortFieldContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + self.state = 17 + self.match(ExprParser.IDENTIFIER) + self.state = 18 + self.match(ExprParser.T__0) + self.state = 19 + self.match(ExprParser.IDENTIFIER) + pass + + elif la_ == 3: + localctx = ExprParser.NegationContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + self.state = 20 + self.match(ExprParser.T__1) + self.state = 21 + self.expr(10) + pass + + elif la_ == 4: + localctx = ExprParser.ExpressionContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + self.state = 22 + self.match(ExprParser.T__2) + self.state = 23 + self.expr(0) + self.state = 24 + self.match(ExprParser.T__3) + pass + + elif la_ == 5: + localctx = ExprParser.FunctionContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + self.state = 26 + self.match(ExprParser.IDENTIFIER) + self.state = 27 + self.match(ExprParser.T__2) + self.state = 28 + self.expr(0) + self.state = 29 + self.match(ExprParser.T__3) + pass + + elif la_ == 6: + localctx = ExprParser.TimeShiftContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + self.state = 31 + self.match(ExprParser.IDENTIFIER) + self.state = 32 + self.match(ExprParser.T__7) + self.state = 33 + self.shift() + self.state = 38 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la == 9: + self.state = 34 + self.match(ExprParser.T__8) + self.state = 35 + self.shift() + self.state = 40 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 41 + self.match(ExprParser.T__9) + pass + + elif la_ == 7: + localctx = ExprParser.TimeIndexContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + self.state = 43 + self.match(ExprParser.IDENTIFIER) + self.state = 44 + self.match(ExprParser.T__7) + self.state = 45 + self.expr(0) + self.state = 50 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la == 9: + self.state = 46 + self.match(ExprParser.T__8) + self.state = 47 + self.expr(0) + self.state = 52 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 53 + self.match(ExprParser.T__9) + pass + + elif la_ == 8: + localctx = ExprParser.TimeShiftRangeContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + self.state = 55 + self.match(ExprParser.IDENTIFIER) + self.state = 56 + self.match(ExprParser.T__7) + self.state = 57 + localctx.shift1 = self.shift() + self.state = 58 + self.match(ExprParser.T__10) + self.state = 59 + localctx.shift2 = self.shift() + self.state = 60 + self.match(ExprParser.T__9) + pass + + elif la_ == 9: + localctx = ExprParser.TimeRangeContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + self.state = 62 + self.match(ExprParser.IDENTIFIER) + self.state = 63 + self.match(ExprParser.T__7) + self.state = 64 + self.expr(0) + self.state = 65 + self.match(ExprParser.T__10) + self.state = 66 + self.expr(0) + self.state = 67 + self.match(ExprParser.T__9) + pass + + self._ctx.stop = self._input.LT(-1) + self.state = 82 + self._errHandler.sync(self) + _alt = self._interp.adaptivePredict(self._input, 4, self._ctx) + while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER: + if _alt == 1: + if self._parseListeners is not None: + self.triggerExitRuleEvent() + _prevctx = localctx + self.state = 80 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input, 3, self._ctx) + if la_ == 1: + localctx = ExprParser.MuldivContext( + self, ExprParser.ExprContext(self, _parentctx, _parentState) + ) + self.pushNewRecursionContext( + localctx, _startState, self.RULE_expr + ) + self.state = 71 + if not self.precpred(self._ctx, 8): + from antlr4.error.Errors import FailedPredicateException + + raise FailedPredicateException( + self, "self.precpred(self._ctx, 8)" + ) + self.state = 72 + localctx.op = self._input.LT(1) + _la = self._input.LA(1) + if not (_la == 5 or _la == 6): + localctx.op = self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + self.state = 73 + self.expr(9) + pass + + elif la_ == 2: + localctx = ExprParser.AddsubContext( + self, ExprParser.ExprContext(self, _parentctx, _parentState) + ) + self.pushNewRecursionContext( + localctx, _startState, self.RULE_expr + ) + self.state = 74 + if not self.precpred(self._ctx, 7): + from antlr4.error.Errors import FailedPredicateException + + raise FailedPredicateException( + self, "self.precpred(self._ctx, 7)" + ) + self.state = 75 + localctx.op = self._input.LT(1) + _la = self._input.LA(1) + if not (_la == 2 or _la == 7): + localctx.op = self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + self.state = 76 + self.expr(8) + pass + + elif la_ == 3: + localctx = ExprParser.ComparisonContext( + self, ExprParser.ExprContext(self, _parentctx, _parentState) + ) + self.pushNewRecursionContext( + localctx, _startState, self.RULE_expr + ) + self.state = 77 + if not self.precpred(self._ctx, 6): + from antlr4.error.Errors import FailedPredicateException + + raise FailedPredicateException( + self, "self.precpred(self._ctx, 6)" + ) + self.state = 78 + self.match(ExprParser.COMPARISON) + self.state = 79 + self.expr(7) + pass + + self.state = 84 + self._errHandler.sync(self) + _alt = self._interp.adaptivePredict(self._input, 4, self._ctx) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.unrollRecursionContexts(_parentctx) + return localctx + + class AtomContext(ParserRuleContext): + __slots__ = "parser" + + def __init__( + self, parser, parent: ParserRuleContext = None, invokingState: int = -1 + ): + super().__init__(parent, invokingState) + self.parser = parser + + def getRuleIndex(self): + return ExprParser.RULE_atom + + def copyFrom(self, ctx: ParserRuleContext): + super().copyFrom(ctx) + + class NumberContext(AtomContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.AtomContext + super().__init__(parser) + self.copyFrom(ctx) + + def NUMBER(self): + return self.getToken(ExprParser.NUMBER, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitNumber"): + return visitor.visitNumber(self) + else: + return visitor.visitChildren(self) + + class IdentifierContext(AtomContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.AtomContext + super().__init__(parser) + self.copyFrom(ctx) + + def IDENTIFIER(self): + return self.getToken(ExprParser.IDENTIFIER, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitIdentifier"): + return visitor.visitIdentifier(self) + else: + return visitor.visitChildren(self) + + def atom(self): + localctx = ExprParser.AtomContext(self, self._ctx, self.state) + self.enterRule(localctx, 4, self.RULE_atom) + try: + self.state = 87 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [12]: + localctx = ExprParser.NumberContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 85 + self.match(ExprParser.NUMBER) + pass + elif token in [14]: + localctx = ExprParser.IdentifierContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 86 + self.match(ExprParser.IDENTIFIER) + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ShiftContext(ParserRuleContext): + __slots__ = "parser" + + def __init__( + self, parser, parent: ParserRuleContext = None, invokingState: int = -1 + ): + super().__init__(parent, invokingState) + self.parser = parser + + def TIME(self): + return self.getToken(ExprParser.TIME, 0) + + def shift_expr(self): + return self.getTypedRuleContext(ExprParser.Shift_exprContext, 0) + + def getRuleIndex(self): + return ExprParser.RULE_shift + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitShift"): + return visitor.visitShift(self) + else: + return visitor.visitChildren(self) + + def shift(self): + localctx = ExprParser.ShiftContext(self, self._ctx, self.state) + self.enterRule(localctx, 6, self.RULE_shift) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 89 + self.match(ExprParser.TIME) + self.state = 91 + self._errHandler.sync(self) + _la = self._input.LA(1) + if _la == 2 or _la == 7: + self.state = 90 + self.shift_expr(0) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class Shift_exprContext(ParserRuleContext): + __slots__ = "parser" + + def __init__( + self, parser, parent: ParserRuleContext = None, invokingState: int = -1 + ): + super().__init__(parent, invokingState) + self.parser = parser + + def getRuleIndex(self): + return ExprParser.RULE_shift_expr + + def copyFrom(self, ctx: ParserRuleContext): + super().copyFrom(ctx) + + class SignedAtomContext(Shift_exprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.Shift_exprContext + super().__init__(parser) + self.op = None # Token + self.copyFrom(ctx) + + def atom(self): + return self.getTypedRuleContext(ExprParser.AtomContext, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitSignedAtom"): + return visitor.visitSignedAtom(self) + else: + return visitor.visitChildren(self) + + class SignedExpressionContext(Shift_exprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.Shift_exprContext + super().__init__(parser) + self.op = None # Token + self.copyFrom(ctx) + + def expr(self): + return self.getTypedRuleContext(ExprParser.ExprContext, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitSignedExpression"): + return visitor.visitSignedExpression(self) + else: + return visitor.visitChildren(self) + + class ShiftMuldivContext(Shift_exprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.Shift_exprContext + super().__init__(parser) + self.op = None # Token + self.copyFrom(ctx) + + def shift_expr(self): + return self.getTypedRuleContext(ExprParser.Shift_exprContext, 0) + + def right_expr(self): + return self.getTypedRuleContext(ExprParser.Right_exprContext, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitShiftMuldiv"): + return visitor.visitShiftMuldiv(self) + else: + return visitor.visitChildren(self) + + class ShiftAddsubContext(Shift_exprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.Shift_exprContext + super().__init__(parser) + self.op = None # Token + self.copyFrom(ctx) + + def shift_expr(self): + return self.getTypedRuleContext(ExprParser.Shift_exprContext, 0) + + def right_expr(self): + return self.getTypedRuleContext(ExprParser.Right_exprContext, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitShiftAddsub"): + return visitor.visitShiftAddsub(self) + else: + return visitor.visitChildren(self) + + def shift_expr(self, _p: int = 0): + _parentctx = self._ctx + _parentState = self.state + localctx = ExprParser.Shift_exprContext(self, self._ctx, _parentState) + _prevctx = localctx + _startState = 8 + self.enterRecursionRule(localctx, 8, self.RULE_shift_expr, _p) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 101 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input, 7, self._ctx) + if la_ == 1: + localctx = ExprParser.SignedAtomContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + + self.state = 94 + localctx.op = self._input.LT(1) + _la = self._input.LA(1) + if not (_la == 2 or _la == 7): + localctx.op = self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + self.state = 95 + self.atom() + pass + + elif la_ == 2: + localctx = ExprParser.SignedExpressionContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + self.state = 96 + localctx.op = self._input.LT(1) + _la = self._input.LA(1) + if not (_la == 2 or _la == 7): + localctx.op = self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + self.state = 97 + self.match(ExprParser.T__2) + self.state = 98 + self.expr(0) + self.state = 99 + self.match(ExprParser.T__3) + pass + + self._ctx.stop = self._input.LT(-1) + self.state = 111 + self._errHandler.sync(self) + _alt = self._interp.adaptivePredict(self._input, 9, self._ctx) + while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER: + if _alt == 1: + if self._parseListeners is not None: + self.triggerExitRuleEvent() + _prevctx = localctx + self.state = 109 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input, 8, self._ctx) + if la_ == 1: + localctx = ExprParser.ShiftMuldivContext( + self, + ExprParser.Shift_exprContext( + self, _parentctx, _parentState + ), + ) + self.pushNewRecursionContext( + localctx, _startState, self.RULE_shift_expr + ) + self.state = 103 + if not self.precpred(self._ctx, 4): + from antlr4.error.Errors import FailedPredicateException + + raise FailedPredicateException( + self, "self.precpred(self._ctx, 4)" + ) + self.state = 104 + localctx.op = self._input.LT(1) + _la = self._input.LA(1) + if not (_la == 5 or _la == 6): + localctx.op = self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + self.state = 105 + self.right_expr(0) + pass + + elif la_ == 2: + localctx = ExprParser.ShiftAddsubContext( + self, + ExprParser.Shift_exprContext( + self, _parentctx, _parentState + ), + ) + self.pushNewRecursionContext( + localctx, _startState, self.RULE_shift_expr + ) + self.state = 106 + if not self.precpred(self._ctx, 3): + from antlr4.error.Errors import FailedPredicateException + + raise FailedPredicateException( + self, "self.precpred(self._ctx, 3)" + ) + self.state = 107 + localctx.op = self._input.LT(1) + _la = self._input.LA(1) + if not (_la == 2 or _la == 7): + localctx.op = self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + self.state = 108 + self.right_expr(0) + pass + + self.state = 113 + self._errHandler.sync(self) + _alt = self._interp.adaptivePredict(self._input, 9, self._ctx) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.unrollRecursionContexts(_parentctx) + return localctx + + class Right_exprContext(ParserRuleContext): + __slots__ = "parser" + + def __init__( + self, parser, parent: ParserRuleContext = None, invokingState: int = -1 + ): + super().__init__(parent, invokingState) + self.parser = parser + + def getRuleIndex(self): + return ExprParser.RULE_right_expr + + def copyFrom(self, ctx: ParserRuleContext): + super().copyFrom(ctx) + + class RightExpressionContext(Right_exprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.Right_exprContext + super().__init__(parser) + self.copyFrom(ctx) + + def expr(self): + return self.getTypedRuleContext(ExprParser.ExprContext, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitRightExpression"): + return visitor.visitRightExpression(self) + else: + return visitor.visitChildren(self) + + class RightMuldivContext(Right_exprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.Right_exprContext + super().__init__(parser) + self.op = None # Token + self.copyFrom(ctx) + + def right_expr(self, i: int = None): + if i is None: + return self.getTypedRuleContexts(ExprParser.Right_exprContext) + else: + return self.getTypedRuleContext(ExprParser.Right_exprContext, i) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitRightMuldiv"): + return visitor.visitRightMuldiv(self) + else: + return visitor.visitChildren(self) + + class RightAtomContext(Right_exprContext): + def __init__( + self, parser, ctx: ParserRuleContext + ): # actually a ExprParser.Right_exprContext + super().__init__(parser) + self.copyFrom(ctx) + + def atom(self): + return self.getTypedRuleContext(ExprParser.AtomContext, 0) + + def accept(self, visitor: ParseTreeVisitor): + if hasattr(visitor, "visitRightAtom"): + return visitor.visitRightAtom(self) + else: + return visitor.visitChildren(self) + + def right_expr(self, _p: int = 0): + _parentctx = self._ctx + _parentState = self.state + localctx = ExprParser.Right_exprContext(self, self._ctx, _parentState) + _prevctx = localctx + _startState = 10 + self.enterRecursionRule(localctx, 10, self.RULE_right_expr, _p) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 120 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [3]: + localctx = ExprParser.RightExpressionContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + + self.state = 115 + self.match(ExprParser.T__2) + self.state = 116 + self.expr(0) + self.state = 117 + self.match(ExprParser.T__3) + pass + elif token in [12, 14]: + localctx = ExprParser.RightAtomContext(self, localctx) + self._ctx = localctx + _prevctx = localctx + self.state = 119 + self.atom() + pass + else: + raise NoViableAltException(self) + + self._ctx.stop = self._input.LT(-1) + self.state = 127 + self._errHandler.sync(self) + _alt = self._interp.adaptivePredict(self._input, 11, self._ctx) + while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER: + if _alt == 1: + if self._parseListeners is not None: + self.triggerExitRuleEvent() + _prevctx = localctx + localctx = ExprParser.RightMuldivContext( + self, + ExprParser.Right_exprContext(self, _parentctx, _parentState), + ) + self.pushNewRecursionContext( + localctx, _startState, self.RULE_right_expr + ) + self.state = 122 + if not self.precpred(self._ctx, 3): + from antlr4.error.Errors import FailedPredicateException + + raise FailedPredicateException( + self, "self.precpred(self._ctx, 3)" + ) + self.state = 123 + localctx.op = self._input.LT(1) + _la = self._input.LA(1) + if not (_la == 5 or _la == 6): + localctx.op = self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + self.state = 124 + self.right_expr(4) + self.state = 129 + self._errHandler.sync(self) + _alt = self._interp.adaptivePredict(self._input, 11, self._ctx) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.unrollRecursionContexts(_parentctx) + return localctx + + def sempred(self, localctx: RuleContext, ruleIndex: int, predIndex: int): + if self._predicates == None: + self._predicates = dict() + self._predicates[1] = self.expr_sempred + self._predicates[4] = self.shift_expr_sempred + self._predicates[5] = self.right_expr_sempred + pred = self._predicates.get(ruleIndex, None) + if pred is None: + raise Exception("No predicate with index:" + str(ruleIndex)) + else: + return pred(localctx, predIndex) + + def expr_sempred(self, localctx: ExprContext, predIndex: int): + if predIndex == 0: + return self.precpred(self._ctx, 8) + + if predIndex == 1: + return self.precpred(self._ctx, 7) + + if predIndex == 2: + return self.precpred(self._ctx, 6) + + def shift_expr_sempred(self, localctx: Shift_exprContext, predIndex: int): + if predIndex == 3: + return self.precpred(self._ctx, 4) + + if predIndex == 4: + return self.precpred(self._ctx, 3) + + def right_expr_sempred(self, localctx: Right_exprContext, predIndex: int): + if predIndex == 5: + return self.precpred(self._ctx, 3) diff --git a/src/andromede/expression/parsing/antlr/ExprVisitor.py b/src/andromede/expression/parsing/antlr/ExprVisitor.py new file mode 100644 index 00000000..0e924349 --- /dev/null +++ b/src/andromede/expression/parsing/antlr/ExprVisitor.py @@ -0,0 +1,106 @@ +# Generated from Expr.g4 by ANTLR 4.13.1 +from antlr4 import * + +if "." in __name__: + from .ExprParser import ExprParser +else: + from ExprParser import ExprParser + +# This class defines a complete generic visitor for a parse tree produced by ExprParser. + + +class ExprVisitor(ParseTreeVisitor): + # Visit a parse tree produced by ExprParser#fullexpr. + def visitFullexpr(self, ctx: ExprParser.FullexprContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#negation. + def visitNegation(self, ctx: ExprParser.NegationContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#unsignedAtom. + def visitUnsignedAtom(self, ctx: ExprParser.UnsignedAtomContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#expression. + def visitExpression(self, ctx: ExprParser.ExpressionContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#timeIndex. + def visitTimeIndex(self, ctx: ExprParser.TimeIndexContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#comparison. + def visitComparison(self, ctx: ExprParser.ComparisonContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#timeShift. + def visitTimeShift(self, ctx: ExprParser.TimeShiftContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#function. + def visitFunction(self, ctx: ExprParser.FunctionContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#addsub. + def visitAddsub(self, ctx: ExprParser.AddsubContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#timeShiftRange. + def visitTimeShiftRange(self, ctx: ExprParser.TimeShiftRangeContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#portField. + def visitPortField(self, ctx: ExprParser.PortFieldContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#muldiv. + def visitMuldiv(self, ctx: ExprParser.MuldivContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#timeRange. + def visitTimeRange(self, ctx: ExprParser.TimeRangeContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#number. + def visitNumber(self, ctx: ExprParser.NumberContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#identifier. + def visitIdentifier(self, ctx: ExprParser.IdentifierContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#shift. + def visitShift(self, ctx: ExprParser.ShiftContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#signedAtom. + def visitSignedAtom(self, ctx: ExprParser.SignedAtomContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#signedExpression. + def visitSignedExpression(self, ctx: ExprParser.SignedExpressionContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#shiftMuldiv. + def visitShiftMuldiv(self, ctx: ExprParser.ShiftMuldivContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#shiftAddsub. + def visitShiftAddsub(self, ctx: ExprParser.ShiftAddsubContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#rightExpression. + def visitRightExpression(self, ctx: ExprParser.RightExpressionContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#rightMuldiv. + def visitRightMuldiv(self, ctx: ExprParser.RightMuldivContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by ExprParser#rightAtom. + def visitRightAtom(self, ctx: ExprParser.RightAtomContext): + return self.visitChildren(ctx) + + +del ExprParser diff --git a/src/andromede/expression/parsing/parse_expression.py b/src/andromede/expression/parsing/parse_expression.py new file mode 100644 index 00000000..e96a70f1 --- /dev/null +++ b/src/andromede/expression/parsing/parse_expression.py @@ -0,0 +1,259 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +from dataclasses import dataclass +from typing import Set + +from antlr4 import CommonTokenStream, DiagnosticErrorListener, InputStream +from antlr4.error.ErrorStrategy import BailErrorStrategy + +from andromede.expression import ExpressionNode, literal, param, var +from andromede.expression.equality import expressions_equal +from andromede.expression.expression import ( + Comparator, + ComparisonNode, + ExpressionRange, + PortFieldNode, +) +from andromede.expression.parsing.antlr.ExprLexer import ExprLexer +from andromede.expression.parsing.antlr.ExprParser import ExprParser +from andromede.expression.parsing.antlr.ExprVisitor import ExprVisitor + + +@dataclass(frozen=True) +class ModelIdentifiers: + """ + Allows to distinguish between parameters and variables. + """ + + variables: Set[str] + parameters: Set[str] + + def is_variable(self, identifier: str) -> bool: + return identifier in self.variables + + def is_parameter(self, identifier: str) -> bool: + return identifier in self.parameters + + +@dataclass(frozen=True) +class ExpressionNodeBuilderVisitor(ExprVisitor): + """ + Visits a tree created by ANTLR to create our AST representation. + """ + + identifiers: ModelIdentifiers + + def visitFullexpr(self, ctx: ExprParser.FullexprContext) -> ExpressionNode: + return ctx.expr().accept(self) # type: ignore + + # Visit a parse tree produced by ExprParser#number. + def visitNumber(self, ctx: ExprParser.NumberContext) -> ExpressionNode: + return literal(float(ctx.NUMBER().getText())) # type: ignore + + # Visit a parse tree produced by ExprParser#identifier. + def visitIdentifier(self, ctx: ExprParser.IdentifierContext) -> ExpressionNode: + return self._convert_identifier(ctx.IDENTIFIER().getText()) # type: ignore + + # Visit a parse tree produced by ExprParser#division. + def visitMuldiv(self, ctx: ExprParser.MuldivContext) -> ExpressionNode: + left = ctx.expr(0).accept(self) # type: ignore + right = ctx.expr(1).accept(self) # type: ignore + op = ctx.op.text # type: ignore + if op == "*": + return left * right + elif op == "/": + return left / right + raise ValueError(f"Invalid operator {op}") + + # Visit a parse tree produced by ExprParser#subtraction. + def visitAddsub(self, ctx: ExprParser.AddsubContext) -> ExpressionNode: + left = ctx.expr(0).accept(self) # type: ignore + right = ctx.expr(1).accept(self) # type: ignore + op = ctx.op.text # type: ignore + if op == "+": + return left + right + elif op == "-": + return left - right + raise ValueError(f"Invalid operator {op}") + + # Visit a parse tree produced by ExprParser#negation. + def visitNegation(self, ctx: ExprParser.NegationContext) -> ExpressionNode: + return -ctx.expr().accept(self) # type: ignore + + # Visit a parse tree produced by ExprParser#expression. + def visitExpression(self, ctx: ExprParser.ExpressionContext) -> ExpressionNode: + return ctx.expr().accept(self) # type: ignore + + # Visit a parse tree produced by ExprParser#unsignedAtom. + def visitUnsignedAtom(self, ctx: ExprParser.UnsignedAtomContext) -> ExpressionNode: + return ctx.atom().accept(self) # type: ignore + + def _convert_identifier(self, identifier: str) -> ExpressionNode: + if self.identifiers.is_variable(identifier): + return var(identifier) + elif self.identifiers.is_parameter(identifier): + return param(identifier) + raise ValueError(f"{identifier} is not a valid variable or parameter name.") + + # Visit a parse tree produced by ExprParser#portField. + def visitPortField(self, ctx: ExprParser.PortFieldContext) -> ExpressionNode: + return PortFieldNode( + port_name=ctx.IDENTIFIER(0).getText(), # type: ignore + field_name=ctx.IDENTIFIER(1).getText(), # type: ignore + ) + + # Visit a parse tree produced by ExprParser#comparison. + def visitComparison(self, ctx: ExprParser.ComparisonContext) -> ExpressionNode: + op = ctx.COMPARISON().getText() # type: ignore + exp1 = ctx.expr(0).accept(self) # type: ignore + exp2 = ctx.expr(1).accept(self) # type: ignore + comp = { + "=": Comparator.EQUAL, + "<=": Comparator.LESS_THAN, + ">=": Comparator.GREATER_THAN, + }[op] + return ComparisonNode(exp1, exp2, comp) + + # Visit a parse tree produced by ExprParser#timeShift. + def visitTimeIndex(self, ctx: ExprParser.TimeIndexContext) -> ExpressionNode: + shifted_expr = self._convert_identifier(ctx.IDENTIFIER().getText()) # type: ignore + time_shifts = [e.accept(self) for e in ctx.expr()] # type: ignore + return shifted_expr.eval(time_shifts) + + # Visit a parse tree produced by ExprParser#rangeTimeShift. + def visitTimeRange(self, ctx: ExprParser.TimeRangeContext) -> ExpressionNode: + shifted_expr = self._convert_identifier(ctx.IDENTIFIER().getText()) # type: ignore + expressions = [e.accept(self) for e in ctx.expr()] # type: ignore + return shifted_expr.eval(ExpressionRange(expressions[0], expressions[1])) + + def visitTimeShift(self, ctx: ExprParser.TimeShiftContext) -> ExpressionNode: + shifted_expr = self._convert_identifier(ctx.IDENTIFIER().getText()) # type: ignore + time_shifts = [s.accept(self) for s in ctx.shift()] # type: ignore + # specifics for x[t] ... + if len(time_shifts) == 1 and expressions_equal(time_shifts[0], literal(0)): + return shifted_expr + return shifted_expr.shift(time_shifts) + + def visitTimeShiftRange( + self, ctx: ExprParser.TimeShiftRangeContext + ) -> ExpressionNode: + shifted_expr = self._convert_identifier(ctx.IDENTIFIER().getText()) # type: ignore + shift1 = ctx.shift1.accept(self) # type: ignore + shift2 = ctx.shift2.accept(self) # type: ignore + return shifted_expr.shift(ExpressionRange(shift1, shift2)) + + # Visit a parse tree produced by ExprParser#function. + def visitFunction(self, ctx: ExprParser.FunctionContext) -> ExpressionNode: + function_name: str = ctx.IDENTIFIER().getText() # type: ignore + operand: ExpressionNode = ctx.expr().accept(self) # type: ignore + fn = _FUNCTIONS.get(function_name, None) + if fn is None: + raise ValueError(f"Encountered invalid function name {function_name}") + return fn(operand) + + # Visit a parse tree produced by ExprParser#shift. + def visitShift(self, ctx: ExprParser.ShiftContext) -> ExpressionNode: + if ctx.shift_expr() is None: # type: ignore + return literal(0) + shift = ctx.shift_expr().accept(self) # type: ignore + return shift + + # Visit a parse tree produced by ExprParser#shiftAddsub. + def visitShiftAddsub(self, ctx: ExprParser.ShiftAddsubContext) -> ExpressionNode: + left = ctx.shift_expr().accept(self) # type: ignore + right = ctx.right_expr().accept(self) # type: ignore + op = ctx.op.text # type: ignore + if op == "+": + return left + right + elif op == "-": + return left - right + raise ValueError(f"Invalid operator {op}") + + # Visit a parse tree produced by ExprParser#shiftMuldiv. + def visitShiftMuldiv(self, ctx: ExprParser.ShiftMuldivContext) -> ExpressionNode: + left = ctx.shift_expr().accept(self) # type: ignore + right = ctx.right_expr().accept(self) # type: ignore + op = ctx.op.text # type: ignore + if op == "*": + return left * right + elif op == "/": + return left / right + raise ValueError(f"Invalid operator {op}") + + # Visit a parse tree produced by ExprParser#signedExpression. + def visitSignedExpression( + self, ctx: ExprParser.SignedExpressionContext + ) -> ExpressionNode: + if ctx.op.text == "-": # type: ignore + return -ctx.expr().accept(self) # type: ignore + else: + return ctx.expr().accept(self) # type: ignore + + # Visit a parse tree produced by ExprParser#signedAtom. + def visitSignedAtom(self, ctx: ExprParser.SignedAtomContext) -> ExpressionNode: + if ctx.op.text == "-": # type: ignore + return -ctx.atom().accept(self) # type: ignore + else: + return ctx.atom().accept(self) # type: ignore + + # Visit a parse tree produced by ExprParser#rightExpression. + def visitRightExpression( + self, ctx: ExprParser.RightExpressionContext + ) -> ExpressionNode: + return ctx.expr().accept(self) # type: ignore + + # Visit a parse tree produced by ExprParser#rightMuldiv. + def visitRightMuldiv(self, ctx: ExprParser.RightMuldivContext) -> ExpressionNode: + left = ctx.right_expr(0).accept(self) # type: ignore + right = ctx.right_expr(1).accept(self) # type: ignore + op = ctx.op.text # type: ignore + if op == "*": + return left * right + elif op == "/": + return left / right + raise ValueError(f"Invalid operator {op}") + + # Visit a parse tree produced by ExprParser#rightAtom. + def visitRightAtom(self, ctx: ExprParser.RightAtomContext) -> ExpressionNode: + return ctx.atom().accept(self) # type: ignore + + +_FUNCTIONS = { + "sum": ExpressionNode.sum, + "sum_connections": ExpressionNode.sum_connections, + "expec": ExpressionNode.expec, +} + + +class AntaresParseException(Exception): + pass + + +def parse_expression(expression: str, identifiers: ModelIdentifiers) -> ExpressionNode: + """ + Parses a string expression to create the corresponding AST representation. + """ + try: + input = InputStream(expression) + lexer = ExprLexer(input) + stream = CommonTokenStream(lexer) + parser = ExprParser(stream) + parser._errHandler = BailErrorStrategy() + + return ExpressionNodeBuilderVisitor(identifiers).visit(parser.fullexpr()) # type: ignore + + except ValueError as e: + raise AntaresParseException(f"An error occurred during parsing: {e}") from e + except Exception as e: + raise AntaresParseException( + f"An error occurred during parsing: {type(e).__name__}" + ) from e diff --git a/src/andromede/expression/time_operator.py b/src/andromede/expression/time_operator.py index f8e98374..63059528 100644 --- a/src/andromede/expression/time_operator.py +++ b/src/andromede/expression/time_operator.py @@ -76,7 +76,7 @@ class TimeEvaluation(TimeOperator): Absolute time evalaution of variables Examples: - >>> x.evaluate([1, 2, 4]) represents the vector of variables (x[1], x[2], x[4]) + >>> x.eval([1, 2, 4]) represents the vector of variables (x[1], x[2], x[4]) """ def __str__(self) -> str: diff --git a/src/andromede/libs/standard.py b/src/andromede/libs/standard.py index 48e05d7e..0aaebc9a 100644 --- a/src/andromede/libs/standard.py +++ b/src/andromede/libs/standard.py @@ -64,32 +64,6 @@ .sum() .expec(), ) -""" -A standard model for a linear cost generation, limited by a maximum generation. -""" -GENERATOR_MODEL = model( - id="GEN", - parameters=[ - float_parameter("p_max", CONSTANT), - float_parameter("cost", CONSTANT), - ], - variables=[float_variable("generation", lower_bound=literal(0))], - ports=[ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port")], - port_fields_definitions=[ - PortFieldDefinition( - port_field=PortFieldId("balance_port", "flow"), - definition=var("generation"), - ) - ], - constraints=[ - Constraint( - name="Max generation", expression=var("generation") <= param("p_max") - ), - ], - objective_operational_contribution=(param("cost") * var("generation")) - .sum() - .expec(), -) """ Basic link model using ports @@ -133,6 +107,33 @@ ], ) +""" +A standard model for a linear cost generation, limited by a maximum generation. +""" +GENERATOR_MODEL = model( + id="GEN", + parameters=[ + float_parameter("p_max", CONSTANT), + float_parameter("cost", CONSTANT), + ], + variables=[float_variable("generation", lower_bound=literal(0))], + ports=[ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port")], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId("balance_port", "flow"), + definition=var("generation"), + ) + ], + constraints=[ + Constraint( + name="Max generation", expression=var("generation") <= param("p_max") + ), + ], + objective_operational_contribution=(param("cost") * var("generation")) + .sum() + .expec(), +) + GENERATOR_MODEL_WITH_PMIN = model( id="GEN", parameters=[ @@ -163,6 +164,39 @@ .expec(), ) +""" +A model for a linear cost generation limited by a maximum generation per time-step +and total generation in whole period. It considers a full storage with no replenishing +""" +GENERATOR_MODEL_WITH_STORAGE = model( + id="GEN", + parameters=[ + float_parameter("p_max", CONSTANT), + float_parameter("cost", CONSTANT), + float_parameter("full_storage", CONSTANT), + ], + variables=[float_variable("generation", lower_bound=literal(0))], + ports=[ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port")], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId("balance_port", "flow"), + definition=var("generation"), + ) + ], + constraints=[ + Constraint( + name="Max generation", expression=var("generation") <= param("p_max") + ), + Constraint( + name="Total storage", + expression=var("generation").sum() <= param("full_storage"), + ), + ], + objective_operational_contribution=(param("cost") * var("generation")) + .sum() + .expec(), +) + # For now, no starting cost THERMAL_CLUSTER_MODEL_HD = model( id="GEN", diff --git a/src/andromede/libs/standard_sc.py b/src/andromede/libs/standard_sc.py new file mode 100644 index 00000000..25200638 --- /dev/null +++ b/src/andromede/libs/standard_sc.py @@ -0,0 +1,291 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. + +from andromede.expression import literal, param, var +from andromede.expression.expression import port_field +from andromede.libs.standard import BALANCE_PORT_TYPE, CONSTANT, TIME_AND_SCENARIO_FREE +from andromede.model import ( + Constraint, + ModelPort, + PortField, + PortType, + float_parameter, + float_variable, + model, +) +from andromede.model.model import PortFieldDefinition, PortFieldId + +""" +Simple Convertor model. +""" +CONVERTOR_MODEL = model( + id="Convertor model", + parameters=[float_parameter("alpha")], + variables=[ + float_variable("input", lower_bound=literal(0)), + ], + ports=[ + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="FlowDI"), + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="FlowDO"), + ], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId("FlowDI", "flow"), + definition=-var("input"), + ), + PortFieldDefinition( + port_field=PortFieldId("FlowDO", "flow"), + definition=var("input") * param("alpha"), + ), + ], +) + +""" +Two inputs Convertor model. +""" +TWO_INPUTS_CONVERTOR_MODEL = model( + id="Convertor model", + parameters=[float_parameter("alpha1"), float_parameter("alpha2")], + variables=[ + float_variable("input1", lower_bound=literal(0)), + float_variable("input2", lower_bound=literal(0)), + ], + ports=[ + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="FlowDI1"), + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="FlowDI2"), + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="FlowDO"), + ], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId("FlowDI1", "flow"), + definition=-var("input1"), + ), + PortFieldDefinition( + port_field=PortFieldId("FlowDI2", "flow"), + definition=-var("input2"), + ), + PortFieldDefinition( + port_field=PortFieldId("FlowDO", "flow"), + definition=var("input1") * param("alpha1") + + var("input2") * param("alpha2"), + ), + ], +) + +DECOMPOSE_1_FLOW_INTO_2_FLOW = model( + id="Consumption aggregation model", + variables=[ + float_variable("input1"), + float_variable("input2"), + ], + ports=[ + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="FlowDI1"), + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="FlowDI2"), + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="FlowDO"), + ], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId("FlowDI1", "flow"), + definition=var("input1"), + ), + PortFieldDefinition( + port_field=PortFieldId("FlowDI2", "flow"), + definition=var("input2"), + ), + ], + binding_constraints=[ + Constraint( + name="Conversion", + expression=var("input1") + var("input2") + == port_field("FlowDO", "flow").sum_connections(), + ) + ], +) + +CONVERTOR_RECEIVE_IN = model( + id="Convertor model", + parameters=[float_parameter("alpha")], + variables=[ + float_variable("input", lower_bound=literal(0)), + ], + ports=[ + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="FlowDI"), + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="FlowDO"), + ], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId("FlowDO", "flow"), + definition=var("input") * param("alpha"), + ), + ], + binding_constraints=[ + Constraint( + name="Conversion", + expression=var("input") == port_field("FlowDI", "flow").sum_connections(), + ) + ], +) + +""" +CO² emmission port +""" +EMISSION_PORT = PortType(id="emission_port", fields=[PortField("Q")]) + +""" +Model of a simple power generator that takes account of CO² emissions related to the production. +The power production p is bounded between p_min and p_max. +An emission factor is used to determine the CO² emission according to the production. +""" +C02_POWER_MODEL = model( + id="CO2 power", + parameters=[ + float_parameter("p_min", CONSTANT), + float_parameter("p_max", CONSTANT), + float_parameter("cost", CONSTANT), + float_parameter("emission_rate", CONSTANT), + ], + variables=[ + float_variable("p", lower_bound=param("p_min"), upper_bound=param("p_max")) + ], + ports=[ + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="FlowP"), + ModelPort(port_type=EMISSION_PORT, port_name="OutCO2"), + ], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId("FlowP", "flow"), + definition=var("p"), + ), + PortFieldDefinition( + port_field=PortFieldId("OutCO2", "Q"), + definition=var("p") * param("emission_rate"), + ), + ], + objective_operational_contribution=(param("cost") * var("p")).sum().expec(), +) + +""" +Model of the CO² quota. +It takes a set a CO² emissions as input. It forces the sum of those emissions to be smaller than a predefined quota. +""" +QUOTA_CO2_MODEL = model( + id="QuotaCO2", + parameters=[float_parameter("quota", CONSTANT)], + ports=[ModelPort(port_type=EMISSION_PORT, port_name="emissionCO2")], + binding_constraints=[ + Constraint( + name="Bound CO2", + expression=port_field("emissionCO2", "Q").sum_connections() + <= param("quota"), + ) + ], +) + +NODE_BALANCE_MODEL_MOD = model( + id="NODE_BALANCE_MODEL_MOD", + variables=[float_variable("p")], + ports=[ + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port_n"), + ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port_e"), + ], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId("balance_port_e", "flow"), + definition=var("p"), + ) + ], + binding_constraints=[ + Constraint( + name="Balance", + expression=var("p") + == port_field("balance_port_n", "flow").sum_connections(), + ) + ], +) + +SHORT_TERM_STORAGE_COMPLEX = model( + id="STS_COMPLEX", + parameters=[ + float_parameter("p_max_injection"), + float_parameter("p_max_withdrawal"), + float_parameter("level_min"), + float_parameter("level_max"), + float_parameter("inflows"), + float_parameter( + "efficiency" + ), # Should be constant, but time-dependent values should work as well + float_parameter("withdrawal_penality"), + float_parameter("level_penality"), + float_parameter("Pgrad+i_penality"), + float_parameter("Pgrad-i_penality"), + float_parameter("Pgrad+s_penality"), + float_parameter("Pgrad-s_penality"), + ], + variables=[ + float_variable( + "injection", lower_bound=literal(0), upper_bound=param("p_max_injection") + ), + float_variable( + "withdrawal", lower_bound=literal(0), upper_bound=param("p_max_withdrawal") + ), + float_variable( + "level", lower_bound=param("level_min"), upper_bound=param("level_max") + ), + float_variable("Pgrad+i", lower_bound=literal(0)), + float_variable("Pgrad-i", lower_bound=literal(0)), + float_variable("Pgrad+s", lower_bound=literal(0)), + float_variable("Pgrad-s", lower_bound=literal(0)), + ], + ports=[ModelPort(port_type=BALANCE_PORT_TYPE, port_name="balance_port")], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId("balance_port", "flow"), + definition=var("withdrawal") - var("injection"), + ) + ], + constraints=[ + Constraint( + name="Level", + expression=var("level") + - var("level").shift(-1) + - param("efficiency") * var("injection") + + var("withdrawal") + == param("inflows"), + ), + Constraint( + "Pgrad+i min", + var("Pgrad+i") >= var("injection") - var("injection").shift(-1), + ), + Constraint( + "Pgrad-i min", + var("Pgrad-i") >= var("injection").shift(-1) - var("injection"), + ), + Constraint( + "Pgrad+s min", + var("Pgrad+s") >= var("withdrawal") - var("withdrawal").shift(-1), + ), + Constraint( + "Pgrad-s min", + var("Pgrad-s") >= var("withdrawal").shift(-1) - var("withdrawal"), + ), + ], + objective_operational_contribution=( + param("level_penality") * var("level") + + param("withdrawal_penality") * var("withdrawal") + + param("Pgrad+i_penality") * var("Pgrad+i") + + param("Pgrad-i_penality") * var("Pgrad-i") + + param("Pgrad+s_penality") * var("Pgrad+s") + + param("Pgrad-s_penality") * var("Pgrad-s") + ) + .sum() + .expec(), +) diff --git a/src/andromede/libs/standard_sc.yml b/src/andromede/libs/standard_sc.yml new file mode 100644 index 00000000..7646df5d --- /dev/null +++ b/src/andromede/libs/standard_sc.yml @@ -0,0 +1,210 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +library: + id: basic + description: Basic library + + port-types: + - id: flow + description: A port which transfers power flow + fields: + - name: flow + - id: emission + description: A port which transfers co2 emission + fields: + - name: emission + + models: + - id: convertor + description: A basic convertor model + parameters: + - name: alpha + time-dependent: false + scenario-dependent: false + variables: + - name: input + lower-bound: 0 + ports: + - name: input_port + type: flow + - name: output_port + type: flow + port-field-definitions: + - port: input_port + field: flow + definition: -input + - port: output_port + field: flow + definition: input * alpha + + - id: two_input_convertor + description: Two input convertor model + parameters: + - name: alpha1 + time-dependent: false + scenario-dependent: false + - name: alpha2 + time-dependent: false + scenario-dependent: false + variables: + - name: input1 + lower-bound: 0 + - name: input2 + lower-bound: 0 + ports: + - name: input_port1 + type: flow + - name: input_port2 + type: flow + - name: output_port + type: flow + port-field-definitions: + - port: input_port1 + field: flow + definition: -input1 + - port: input_port2 + field: flow + definition: -input2 + - port: output_port + field: flow + definition: input1 * alpha1 + input2 * alpha2 + + - id: decompose_1_flow_into_2_flow + description: A Consumption aggregation model + variables: + - name: input1 + - name: input2 + ports: + - name: input_port1 + type: flow + - name: input_port2 + type: flow + - name: output_port + type: flow + port-field-definitions: + - port: input_port1 + field: flow + definition: input1 + - port: input_port2 + field: flow + definition: input2 + binding-constraints: + - name: Conversion + expression: sum_connections(output_port.flow) = input1 + input2 + + - id: convertor_receive_in + description: A convertor model + parameters: + - name: alpha + time-dependent: false + scenario-dependent: false + variables: + - name: input + lower-bound: 0 + ports: + - name: input_port + type: flow + - name: output_port + type: flow + port-field-definitions: + - port: output_port + field: flow + definition: input * alpha + binding-constraints: + - name: Conversion + expression: sum_connections(input_port.flow) = input + + - id: node_mod + description: A node model with two ports + variables: + - name: p + ports: + - name: injection_port_n + type: flow + - name: injection_port_e + type: flow + port-field-definitions: + - port: injection_port_e + field: flow + definition: p + binding-constraints: + - name: balance + expression: sum_connections(injection_port_n.flow) = p + + - id: generator_with_co2 + description: generator model that emits CO2 + parameters: + - name: pmin + time-dependent: false + scenario-dependent: false + - name: pmax + time-dependent: false + scenario-dependent: false + - name: cost + time-dependent: false + scenario-dependent: false + - name: emission_rate + time-dependent: false + scenario-dependent: false + variables: + - name: p + lower-bound: pmin + upper-bound: pmax + ports: + - name: injection_port + type: flow + - name: co2_port + type: emission + port-field-definitions: + - port: injection_port + field: flow + definition: p + - port: co2_port + field: emission + definition: p * emission_rate + objective: expec(sum(cost * p)) + + - id: quota_co2 + description: A CO2 quota model + parameters: + - name: quota + time-dependent: false + scenario-dependent: false + ports: + - name: emission_port + type: emission + binding-constraints: + - name: bound_co2 + expression: sum_connections(emission_port.emission) <= quota + + - id: link + description: A link model + parameters: + - name: f_max + time-dependent: false + scenario-dependent: false + variables: + - name: flow + lower-bound: -f_max + upper-bound: f_max + ports: + - name: injection_port_from + type: flow + - name: injection_port_to + type: flow + port-field-definitions: + - port: injection_port_from + field: flow + definition: -flow + - port: injection_port_to + field: flow + definition: flow \ No newline at end of file diff --git a/src/andromede/model/common.py b/src/andromede/model/common.py index 07ebdcb4..5763f360 100644 --- a/src/andromede/model/common.py +++ b/src/andromede/model/common.py @@ -19,7 +19,7 @@ class ValueType(Enum): FLOAT = "FLOAT" INTEGER = "INTEGER" - # Needs more ? + BOOL = "BOOL" class ProblemContext(Enum): diff --git a/src/andromede/model/constraint.py b/src/andromede/model/constraint.py index 0f33aa7f..fe778446 100644 --- a/src/andromede/model/constraint.py +++ b/src/andromede/model/constraint.py @@ -14,6 +14,10 @@ from typing import Any from andromede.expression.degree import is_constant +from andromede.expression.equality import ( + expressions_equal, + expressions_equal_if_present, +) from andromede.expression.expression import ( Comparator, ComparisonNode, @@ -78,3 +82,13 @@ def __post_init__( def replicate(self, /, **changes: Any) -> "Constraint": return replace(self, **changes) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Constraint): + return False + return ( + self.name == other.name + and expressions_equal(self.expression, other.expression) + and expressions_equal_if_present(self.lower_bound, other.lower_bound) + and expressions_equal_if_present(self.upper_bound, other.upper_bound) + ) diff --git a/src/andromede/model/library.py b/src/andromede/model/library.py new file mode 100644 index 00000000..e0299154 --- /dev/null +++ b/src/andromede/model/library.py @@ -0,0 +1,31 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +from dataclasses import dataclass +from typing import Dict, Iterable + +from andromede.model import Model, PortType + + +@dataclass(frozen=True) +class Library: + port_types: Dict[str, PortType] + models: Dict[str, Model] + + +def library( + port_types: Iterable[PortType], + models: Iterable[Model], +) -> Library: + return Library( + port_types=dict((p.id, p) for p in port_types), + models=dict((m.id, m) for m in models), + ) diff --git a/src/andromede/model/parsing.py b/src/andromede/model/parsing.py new file mode 100644 index 00000000..ef039ef5 --- /dev/null +++ b/src/andromede/model/parsing.py @@ -0,0 +1,102 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +import typing +from typing import List, Optional + +from pydantic import BaseModel, Field, ValidationError +from yaml import safe_load + + +def parse_yaml_library(input: typing.TextIO) -> "InputLibrary": + tree = safe_load(input) + try: + return InputLibrary.model_validate(tree["library"]) + except ValidationError as e: + raise ValueError(f"An error occurred during parsing: {e}") + + +# Design note: actual parsing and validation is delegated to pydantic models +def _to_kebab(snake: str) -> str: + return snake.replace("_", "-") + + +class ModifiedBaseModel(BaseModel): + class Config: + alias_generator = _to_kebab + extra = "forbid" + + +class InputParameter(ModifiedBaseModel): + name: str + time_dependent: bool = False + scenario_dependent: bool = False + + +class InputVariable(ModifiedBaseModel): + name: str + time_dependent: bool = True + scenario_dependent: bool = True + lower_bound: Optional[str] = None + upper_bound: Optional[str] = None + variable_type: str = "float" + + class Config: + alias_generator = _to_kebab + coerce_numbers_to_str = True + extra = "forbid" + + +class InputConstraint(ModifiedBaseModel): + name: str + expression: str + lower_bound: Optional[str] = None + upper_bound: Optional[str] = None + + +class InputField(ModifiedBaseModel): + name: str + + +class InputPortType(ModifiedBaseModel): + id: str + fields: List[InputField] = Field(default_factory=list) + description: Optional[str] = None + + +class InputModelPort(ModifiedBaseModel): + name: str + type: str + + +class InputPortFieldDefinition(ModifiedBaseModel): + port: str + field: str + definition: str + + +class InputModel(ModifiedBaseModel): + id: str + parameters: List[InputParameter] = Field(default_factory=list) + variables: List[InputVariable] = Field(default_factory=list) + ports: List[InputModelPort] = Field(default_factory=list) + port_field_definitions: List[InputPortFieldDefinition] = Field(default_factory=list) + binding_constraints: List[InputConstraint] = Field(default_factory=list) + constraints: List[InputConstraint] = Field(default_factory=list) + objective: Optional[str] = None + description: Optional[str] = None + + +class InputLibrary(ModifiedBaseModel): + id: str + port_types: List[InputPortType] = Field(default_factory=list) + models: List[InputModel] = Field(default_factory=list) + description: Optional[str] = None diff --git a/src/andromede/model/resolve_library.py b/src/andromede/model/resolve_library.py new file mode 100644 index 00000000..629a258c --- /dev/null +++ b/src/andromede/model/resolve_library.py @@ -0,0 +1,155 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +from typing import Dict, List, Optional + +from andromede.expression import ExpressionNode, literal +from andromede.expression.indexing_structure import IndexingStructure +from andromede.expression.parsing.parse_expression import ( + ModelIdentifiers, + parse_expression, +) +from andromede.model import ( + Constraint, + Model, + ModelPort, + Parameter, + PortField, + PortType, + ProblemContext, + ValueType, + Variable, + model, +) +from andromede.model.library import Library, library +from andromede.model.model import PortFieldDefinition, port_field_def +from andromede.model.parsing import ( + InputConstraint, + InputField, + InputLibrary, + InputModel, + InputModelPort, + InputParameter, + InputPortFieldDefinition, + InputPortType, + InputVariable, +) + + +def resolve_library( + input_lib: InputLibrary, preloaded_libraries: Optional[List[Library]] = None +) -> Library: + """ + Converts parsed data into an actually usable library of models. + + - resolves references between models and ports + - parses expressions and resolves references to variables/params + """ + if preloaded_libraries is None: + preloaded_libraries = [] + port_types = [_convert_port_type(p) for p in input_lib.port_types] + for lib in preloaded_libraries: + port_types.extend(lib.port_types.values()) + port_types_dict = dict((p.id, p) for p in port_types) + models = [_resolve_model(m, port_types_dict) for m in input_lib.models] + return library(port_types, models) + + +def _convert_field(field: InputField) -> PortField: + return PortField(name=field.name) + + +def _convert_port_type(port_type: InputPortType) -> PortType: + return PortType( + id=port_type.id, fields=[_convert_field(f) for f in port_type.fields] + ) + + +def _resolve_model(input_model: InputModel, port_types: Dict[str, PortType]) -> Model: + identifiers = ModelIdentifiers( + variables={v.name for v in input_model.variables}, + parameters={p.name for p in input_model.parameters}, + ) + return model( + id=input_model.id, + parameters=[_to_parameter(p) for p in input_model.parameters], + variables=[_to_variable(v, identifiers) for v in input_model.variables], + ports=[_resolve_model_port(p, port_types) for p in input_model.ports], + port_fields_definitions=[ + _resolve_field_definition(d, identifiers) + for d in input_model.port_field_definitions + ], + binding_constraints=[ + _to_constraint(c, identifiers) for c in input_model.binding_constraints + ], + constraints=[_to_constraint(c, identifiers) for c in input_model.constraints], + objective_operational_contribution=_to_expression_if_present( + input_model.objective, identifiers + ), + ) + + +def _resolve_model_port( + port: InputModelPort, port_types: Dict[str, PortType] +) -> ModelPort: + return ModelPort(port_name=port.name, port_type=port_types[port.type]) + + +def _resolve_field_definition( + definition: InputPortFieldDefinition, ids: ModelIdentifiers +) -> PortFieldDefinition: + return port_field_def( + port_name=definition.port, + field_name=definition.field, + definition=parse_expression(definition.definition, ids), + ) + + +def _to_parameter(param: InputParameter) -> Parameter: + return Parameter( + name=param.name, + type=ValueType.FLOAT, + structure=IndexingStructure(param.time_dependent, param.scenario_dependent), + ) + + +def _to_expression_if_present( + expr: Optional[str], identifiers: ModelIdentifiers +) -> Optional[ExpressionNode]: + if not expr: + return None + return parse_expression(expr, identifiers) + + +def _to_variable(var: InputVariable, identifiers: ModelIdentifiers) -> Variable: + return Variable( + name=var.name, + data_type={"float": ValueType.FLOAT, "integer": ValueType.INTEGER}[ + var.variable_type + ], + structure=IndexingStructure(var.time_dependent, var.scenario_dependent), + lower_bound=_to_expression_if_present(var.lower_bound, identifiers), + upper_bound=_to_expression_if_present(var.upper_bound, identifiers), + context=ProblemContext.OPERATIONAL, + ) + + +def _to_constraint( + constraint: InputConstraint, identifiers: ModelIdentifiers +) -> Constraint: + lb = _to_expression_if_present(constraint.lower_bound, identifiers) + ub = _to_expression_if_present(constraint.upper_bound, identifiers) + return Constraint( + name=constraint.name, + expression=parse_expression(constraint.expression, identifiers), + lower_bound=(lb if lb is not None else literal(-float("inf"))), + upper_bound=(ub if ub is not None else literal(float("inf"))), + ) diff --git a/src/andromede/model/variable.py b/src/andromede/model/variable.py index ca4ca0cd..7bc8a800 100644 --- a/src/andromede/model/variable.py +++ b/src/andromede/model/variable.py @@ -13,8 +13,9 @@ from dataclasses import dataclass, replace from typing import Any, Optional -from andromede.expression import ExpressionNode +from andromede.expression import ExpressionNode, literal from andromede.expression.degree import is_constant +from andromede.expression.equality import expressions_equal_if_present from andromede.expression.indexing_structure import IndexingStructure from andromede.model.common import ProblemContext, ValueType @@ -41,6 +42,17 @@ def __post_init__(self) -> None: def replicate(self, /, **changes: Any) -> "Variable": return replace(self, **changes) + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Variable): + return False + return ( + self.name == other.name + and self.data_type == other.data_type + and expressions_equal_if_present(self.lower_bound, other.lower_bound) + and expressions_equal_if_present(self.upper_bound, other.upper_bound) + and self.structure == other.structure + ) + def int_variable( name: str, @@ -54,6 +66,14 @@ def int_variable( ) +def bool_var( + name: str, + structure: IndexingStructure = IndexingStructure(True, True), + context: ProblemContext = ProblemContext.OPERATIONAL, +) -> Variable: + return Variable(name, ValueType.BOOL, literal(0), literal(1), structure, context) + + def float_variable( name: str, lower_bound: Optional[ExpressionNode] = None, diff --git a/src/andromede/simulation/optimization.py b/src/andromede/simulation/optimization.py index b53d71c7..c23ba378 100644 --- a/src/andromede/simulation/optimization.py +++ b/src/andromede/simulation/optimization.py @@ -15,6 +15,7 @@ into a mathematical optimization problem. """ +import math from abc import ABC, abstractmethod from dataclasses import dataclass from enum import Enum @@ -36,6 +37,7 @@ from andromede.expression.port_resolver import PortFieldKey, resolve_port from andromede.expression.scenario_operator import Expectation from andromede.expression.time_operator import TimeEvaluation, TimeShift, TimeSum +from andromede.model.common import ValueType from andromede.model.constraint import Constraint from andromede.model.model import PortFieldId from andromede.simulation.linear_expression import LinearExpression, Term @@ -726,11 +728,10 @@ def _create_variables(self) -> None: model = component.model for model_var in self.context.build_strategy.get_variables(model): - var_indexing = IndexingStructure( - model_var.structure.time, model_var.structure.scenario - ) + var_indexing = model_var.structure instantiated_lb_expr = None instantiated_ub_expr = None + if model_var.lower_bound: instantiated_lb_expr = _instantiate_model_expression( model_var.lower_bound, component.id, self.context @@ -740,19 +741,16 @@ def _create_variables(self) -> None: model_var.upper_bound, component.id, self.context ) - # Set solver var name - # Externally, for the Solver, this variable will have a full name - # Internally, it will be indexed by a structure that takes into account - # the component id, variable name, timestep and scenario separately - solver_var_name: str = f"{model_var.name}" - if component.id: - solver_var_name = f"{component.id}_{solver_var_name}" - if self.context.tree_node: - solver_var_name = f"{self.context.tree_node}_{solver_var_name}" + var_name: str = f"{model_var.name}" + component_prefix = f"{component.id}_" if component.id else "" for block_timestep in self.context.get_time_indices(var_indexing): - if self.context.block_length() > 1: - solver_var_name = f"{solver_var_name}_t{block_timestep}" + block_suffix = ( + f"_t{block_timestep}" + if var_indexing.is_time_varying() + and (self.context.block_length() > 1) + else "" + ) for scenario in self.context.get_scenario_indices(var_indexing): lower_bound = -self.solver.infinity() @@ -766,15 +764,46 @@ def _create_variables(self) -> None: instantiated_ub_expr ).get_value(block_timestep, scenario) - if self.context.scenarios > 1: - solver_var_name = f"{solver_var_name}_s{scenario}" - - # TODO: Add BoolVar or IntVar if the variable is specified to be integer or bool - solver_var = self.solver.NumVar( - lower_bound, - upper_bound, - solver_var_name, + scenario_suffix = ( + f"_s{scenario}" + if var_indexing.is_scenario_varying() + and (self.context.scenarios > 1) + else "" ) + + # Set solver var name + # Externally, for the Solver, this variable will have a full name + # Internally, it will be indexed by a structure that into account + # the component id, variable name, timestep and scenario separately + solver_var = None + solver_var_name = f"{component_prefix}{var_name}{block_suffix}{scenario_suffix}" + + if math.isclose(lower_bound, upper_bound): + raise ValueError( + f"Upper and lower bounds of variable {solver_var_name} have the same value: {lower_bound}" + ) + elif lower_bound > upper_bound: + raise ValueError( + f"Upper bound ({upper_bound}) must be strictly greater than lower bound ({lower_bound}) for variable {solver_var_name}" + ) + + if model_var.data_type == ValueType.BOOL: + solver_var = self.solver.BoolVar( + solver_var_name, + ) + elif model_var.data_type == ValueType.INTEGER: + solver_var = self.solver.IntVar( + lower_bound, + upper_bound, + solver_var_name, + ) + else: + solver_var = self.solver.NumVar( + lower_bound, + upper_bound, + solver_var_name, + ) + component_context.add_variable( block_timestep, scenario, model_var.name, solver_var ) diff --git a/src/andromede/study/data.py b/src/andromede/study/data.py index db4f0135..c4822e1a 100644 --- a/src/andromede/study/data.py +++ b/src/andromede/study/data.py @@ -9,11 +9,13 @@ # SPDX-License-Identifier: MPL-2.0 # # This file is part of the Antares project. - from abc import ABC, abstractmethod from dataclasses import dataclass +from pathlib import Path from typing import Dict, Mapping, Optional +import pandas as pd + from andromede.study.network import Network @@ -106,6 +108,18 @@ def check_requirement(self, time: bool, scenario: bool) -> bool: return scenario +def load_ts_from_txt( + timeseries_name: Optional[str], path_to_file: Optional[Path] +) -> pd.DataFrame: + if path_to_file is not None and timeseries_name is not None: + timeseries_with_extension = timeseries_name + ".txt" + ts_path = path_to_file / timeseries_with_extension + try: + return pd.read_csv(ts_path, header=None, sep=r"\s+") + except Exception: + raise Exception(f"An error has arrived when processing '{ts_path}'") + + @dataclass(frozen=True) class TimeScenarioSeriesData(AbstractDataStructure): """ @@ -114,10 +128,11 @@ class TimeScenarioSeriesData(AbstractDataStructure): can be defined by referencing one of those timeseries by its ID. """ - time_scenario_series: Mapping[TimeScenarioIndex, float] + time_scenario_series: pd.DataFrame def get_value(self, timestep: int, scenario: int, node_id: str = "") -> float: - return self.time_scenario_series[TimeScenarioIndex(timestep, scenario)] + value = str(self.time_scenario_series.iloc[timestep, scenario]) + return float(value) def check_requirement(self, time: bool, scenario: bool) -> bool: if not isinstance(self, TimeScenarioSeriesData): diff --git a/src/andromede/study/parsing.py b/src/andromede/study/parsing.py new file mode 100644 index 00000000..6f3723fe --- /dev/null +++ b/src/andromede/study/parsing.py @@ -0,0 +1,55 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +import typing +from typing import List, Optional + +from pydantic import BaseModel, Field +from yaml import safe_load + + +def parse_yaml_components(input_components: typing.TextIO) -> "InputComponents": + tree = safe_load(input_components) + return InputComponents.model_validate(tree["study"]) + + +# Design note: actual parsing and validation is delegated to pydantic models +def _to_kebab(snake: str) -> str: + return snake.replace("_", "-") + + +class InputPortConnections(BaseModel): + component1: str + port_1: str + component2: str + port_2: str + + +class InputComponentParameter(BaseModel): + name: str + type: str + value: Optional[float] = None + timeseries: Optional[str] = None + + +class InputComponent(BaseModel): + id: str + model: str + parameters: Optional[List[InputComponentParameter]] = None + + +class InputComponents(BaseModel): + nodes: List[InputComponent] = Field(default_factory=list) + components: List[InputComponent] = Field(default_factory=list) + connections: List[InputPortConnections] = Field(default_factory=list) + + class Config: + alias_generator = _to_kebab diff --git a/src/andromede/study/resolve_components.py b/src/andromede/study/resolve_components.py new file mode 100644 index 00000000..d6650e78 --- /dev/null +++ b/src/andromede/study/resolve_components.py @@ -0,0 +1,170 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, Iterable, List, Optional + +import pandas as pd + +from andromede.model import Model +from andromede.model.library import Library +from andromede.study import ( + Component, + ConstantData, + DataBase, + Network, + Node, + PortRef, + PortsConnection, +) +from andromede.study.data import ( + AbstractDataStructure, + TimeScenarioIndex, + TimeScenarioSeriesData, + load_ts_from_txt, +) +from andromede.study.parsing import ( + InputComponent, + InputComponents, + InputPortConnections, +) + + +@dataclass(frozen=True) +class NetworkComponents: + components: Dict[str, Component] + nodes: Dict[str, Component] + connections: List[PortsConnection] + + +def network_components( + components_list: Iterable[Component], + nodes: Iterable[Component], + connections: Iterable[PortsConnection], +) -> NetworkComponents: + return NetworkComponents( + components=dict((m.id, m) for m in components_list), + nodes=dict((n.id, n) for n in nodes), + connections=list(connections), + ) + + +def resolve_components_and_cnx( + input_comp: InputComponents, library: Library +) -> NetworkComponents: + """ + Resolves: + - components to be used for study + - connections between components""" + components_list = [_resolve_component(library, m) for m in input_comp.components] + nodes = [_resolve_component(library, n) for n in input_comp.nodes] + all_components: List[Component] = components_list + nodes + connections = [] + for cnx in input_comp.connections: + resolved_cnx = _resolve_connections(cnx, all_components) + connections.append(resolved_cnx) + + return network_components(components_list, nodes, connections) + + +def _resolve_component(library: Library, component: InputComponent) -> Component: + model = library.models[component.model] + + return Component( + model=model, + id=component.id, + ) + + +def _resolve_connections( + connection: InputPortConnections, + all_components: List[Component], +) -> PortsConnection: + cnx_component1 = connection.component1 + cnx_component2 = connection.component2 + port1 = connection.port_1 + port2 = connection.port_2 + + component_1 = _get_component_by_id(all_components, cnx_component1) + component_2 = _get_component_by_id(all_components, cnx_component2) + assert component_1 is not None and component_2 is not None + port_ref_1 = PortRef(component_1, port1) + port_ref_2 = PortRef(component_2, port2) + + return PortsConnection(port_ref_1, port_ref_2) + + +def _get_component_by_id( + all_components: List[Component], component_id: str +) -> Optional[Component]: + components_dict = {component.id: component for component in all_components} + return components_dict.get(component_id) + + +def consistency_check( + input_components: Dict[str, Component], input_models: Dict[str, Model] +) -> bool: + """ + Checks if all components in the Components instances have a valid model from the library. + Returns True if all components are consistent, raises ValueError otherwise. + """ + model_ids_set = input_models.keys() + for component_id, component in input_components.items(): + if component.model.id not in model_ids_set: + raise ValueError( + f"Error: Component {component_id} has invalid model ID: {component.model.id}" + ) + return True + + +def build_network(comp_network: NetworkComponents) -> Network: + network = Network("study") + + for node_id, node in comp_network.nodes.items(): + node = Node(model=node.model, id=node_id) + network.add_node(node) + + for component_id, component in comp_network.components.items(): + network.add_component(component) + + for connection in comp_network.connections: + network.connect(connection.port1, connection.port2) + return network + + +def build_data_base( + input_comp: InputComponents, timeseries_dir: Optional[Path] +) -> DataBase: + database = DataBase() + for comp in input_comp.components: + for param in comp.parameters or []: + param_value = _evaluate_param_type( + param.type, param.value, param.timeseries, timeseries_dir + ) + database.add_data(comp.id, param.name, param_value) + + return database + + +def _evaluate_param_type( + param_type: str, + param_value: Optional[float], + timeseries_name: Optional[str], + timeseries_dir: Optional[Path], +) -> AbstractDataStructure: + if param_type == "constant" and param_value is not None: + return ConstantData(float(param_value)) + + elif param_type == "timeseries": + return TimeScenarioSeriesData(load_ts_from_txt(timeseries_name, timeseries_dir)) + + raise ValueError(f"Data should be either constant or timeseries ") diff --git a/tests/functional/conftest.py b/tests/functional/conftest.py new file mode 100644 index 00000000..9d443ee1 --- /dev/null +++ b/tests/functional/conftest.py @@ -0,0 +1,33 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +from pathlib import Path + +import pytest + +from andromede.model.parsing import parse_yaml_library +from andromede.model.resolve_library import resolve_library + + +@pytest.fixture(scope="session") +def libs_dir() -> Path: + return Path(__file__).parent / "libs" + + +@pytest.fixture(scope="session") +def lib(libs_dir: Path): + lib_file = libs_dir / "lib.yml" + + with lib_file.open() as f: + input_lib = parse_yaml_library(f) + + lib = resolve_library(input_lib) + return lib diff --git a/tests/functional/libs/lib.yml b/tests/functional/libs/lib.yml new file mode 100644 index 00000000..27a8b47e --- /dev/null +++ b/tests/functional/libs/lib.yml @@ -0,0 +1,188 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +library: + id: basic + description: Basic library + + port-types: + - id: flow + description: A port which transfers power flow + fields: + - name: flow + + models: + - id: node + ports: + - name: balance_port + type: flow + binding-constraints: + - name: balance + expression: sum_connections(balance_port.flow) = 0 + + - id: demand + parameters: + - name: demand + time-dependent: true + scenario-dependent: true + ports: + - name: balance_port + type: flow + port-field-definitions: + - port: balance_port + field: flow + definition: -demand + + - id: production + parameters: + - name: cost + time-dependent: false + scenario-dependent: false + - name: p_max + time-dependent: false + scenario-dependent: false + variables: + - name: generation + lower-bound: 0 + upper-bound: p_max + ports: + - name: balance_port + type: flow + port-field-definitions: + - port: balance_port + field: flow + definition: generation + objective: expec(sum(cost * generation)) + + - id: production_with_min + parameters: + - name: cost + time-dependent: false + scenario-dependent: false + - name: p_max + time-dependent: false + scenario-dependent: false + - name: p_min + time-dependent: false + scenario-dependent: false + variables: + - name: generation + lower-bound: p_min + upper-bound: p_max + ports: + - name: balance_port + type: flow + port-field-definitions: + - port: balance_port + field: flow + definition: generation + objective: expec(sum(cost * generation)) + + - id: link + parameters: + - name: f_max + time-dependent: false + scenario-dependent: false + variables: + - name: input + lower-bound: -f_max + upper-bound: f_max + ports: + - name: out_port + type: flow + - name: in_port + type: flow + port-field-definitions: + - port: out_port + field: flow + definition: input + - port: in_port + field: flow + definition: -input + + - id: spillage + parameters: + - name: cost + time-dependent: false + scenario-dependent: false + variables: + - name: input + lower-bound: 0 + ports: + - name: balance_port + type: flow + port-field-definitions: + - port: balance_port + field: flow + definition: -input + objective: expec(sum(cost * input)) + + - id: unsuplied + parameters: + - name: cost + time-dependent: false + scenario-dependent: false + variables: + - name: output + lower-bound: 0 + ports: + - name: balance_port + type: flow + port-field-definitions: + - port: balance_port + field: flow + definition: output + objective: expec(sum(cost * output)) + + - id: thermal_cluster + parameters: + - name: p_max + - name: p_min + - name: cost + - name: d_min_up + - name: d_min_down + - name: nb_units_max + - name: nb_failures + variables: + - name: nb_units_on + lower-bound: 0 + upper-bound: nb_units_max + variable-type: integer + - name: nb_starting + lower-bound: 0 + upper-bound: nb_units_max + variable-type: integer + - name: nb_stoping + lower-bound: 0 + upper-bound: nb_units_max + variable-type: integer + - name: production + lower-bound: 0 + upper-bound: nb_units_max * p_max + ports: + - name: balance_port + type: flow + port-field-definitions: + - port: balance_port + field: flow + definition: production + constraints: + - name: max production + expression: production <= nb_units_on * p_max + - name: min production + expression: production >= nb_units_on * p_min + - name: on units variation + expression: nb_units_on = nb_units_on[t-1] + nb_starting - nb_stoping + - name: starting time + expression: sum(nb_starting[t-d_min_up + 1 .. t]) <= nb_units_on + - name: stoping time + expression: sum(nb_stoping[t-d_min_down + 1 .. t]) <= nb_units_max - nb_units_on + objective: expec(sum(cost * production)) \ No newline at end of file diff --git a/tests/functional/test_andromede.py b/tests/functional/test_andromede.py index 300ff88d..f3a3aa36 100644 --- a/tests/functional/test_andromede.py +++ b/tests/functional/test_andromede.py @@ -10,6 +10,7 @@ # # This file is part of the Antares project. +import pandas as pd import pytest from andromede.expression import literal, param, var @@ -46,166 +47,6 @@ ) -def test_network() -> None: - network = Network("test") - assert network.id == "test" - assert list(network.nodes) == [] - assert list(network.components) == [] - assert list(network.all_components) == [] - assert list(network.connections) == [] - - with pytest.raises(KeyError): - network.get_node("N") - - N1 = Node(model=NODE_BALANCE_MODEL, id="N1") - N2 = Node(model=NODE_BALANCE_MODEL, id="N2") - network.add_node(N1) - network.add_node(N2) - assert list(network.nodes) == [N1, N2] - assert network.get_node(N1.id) == N1 - assert network.get_component("N1") == Node(model=NODE_BALANCE_MODEL, id="N1") - with pytest.raises(KeyError): - network.get_component("unknown") - - -def test_basic_balance() -> None: - """ - Balance on one node with one fixed demand and one generation, on 1 timestep. - """ - - database = DataBase() - database.add_data("D", "demand", ConstantData(100)) - - database.add_data("G", "p_max", ConstantData(100)) - database.add_data("G", "cost", ConstantData(30)) - - node = Node(model=NODE_BALANCE_MODEL, id="N") - demand = create_component( - model=DEMAND_MODEL, - id="D", - ) - - gen = create_component( - model=GENERATOR_MODEL, - id="G", - ) - - network = Network("test") - network.add_node(node) - network.add_component(demand) - network.add_component(gen) - network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) - network.connect(PortRef(gen, "balance_port"), PortRef(node, "balance_port")) - - scenarios = 1 - problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) - status = problem.solver.Solve() - - assert status == problem.solver.OPTIMAL - assert problem.solver.Objective().Value() == 3000 - - -def test_link() -> None: - """ - Balance on one node with one fixed demand and one generation, on 1 timestep. - """ - - database = DataBase() - database.add_data("D", "demand", ConstantData(100)) - - database.add_data("G", "p_max", ConstantData(100)) - database.add_data("G", "cost", ConstantData(35)) - - database.add_data("L", "f_max", ConstantData(150)) - - node1 = Node(model=NODE_BALANCE_MODEL, id="1") - node2 = Node(model=NODE_BALANCE_MODEL, id="2") - demand = create_component( - model=DEMAND_MODEL, - id="D", - ) - gen = create_component( - model=GENERATOR_MODEL, - id="G", - ) - link = create_component( - model=LINK_MODEL, - id="L", - ) - - network = Network("test") - network.add_node(node1) - network.add_node(node2) - network.add_component(demand) - network.add_component(gen) - network.add_component(link) - network.connect(PortRef(demand, "balance_port"), PortRef(node1, "balance_port")) - network.connect(PortRef(gen, "balance_port"), PortRef(node2, "balance_port")) - network.connect(PortRef(link, "balance_port_from"), PortRef(node1, "balance_port")) - network.connect(PortRef(link, "balance_port_to"), PortRef(node2, "balance_port")) - - scenarios = 1 - problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) - status = problem.solver.Solve() - - assert status == problem.solver.OPTIMAL - assert problem.solver.Objective().Value() == 3500 - - for variable in problem.solver.variables(): - if "balance_port_from" in variable.name(): - assert variable.solution_value() == 100 - if "balance_port_to" in variable.name(): - assert variable.solution_value() == -100 - - -def test_stacking_generation() -> None: - """ - Balance on one node with one fixed demand and 2 generations with different costs, on 1 timestep. - """ - - database = DataBase() - database.add_data("D", "demand", ConstantData(150)) - - database.add_data("G1", "p_max", ConstantData(100)) - database.add_data("G1", "cost", ConstantData(30)) - - database.add_data("G2", "p_max", ConstantData(100)) - database.add_data("G2", "cost", ConstantData(50)) - - node1 = Node(model=NODE_BALANCE_MODEL, id="1") - - demand = create_component( - model=DEMAND_MODEL, - id="D", - ) - - gen1 = create_component( - model=GENERATOR_MODEL, - id="G1", - ) - - gen2 = create_component( - model=GENERATOR_MODEL, - id="G2", - ) - - network = Network("test") - network.add_node(node1) - network.add_component(demand) - network.add_component(gen1) - network.add_component(gen2) - network.connect(PortRef(demand, "balance_port"), PortRef(node1, "balance_port")) - network.connect(PortRef(gen1, "balance_port"), PortRef(node1, "balance_port")) - network.connect(PortRef(gen2, "balance_port"), PortRef(node1, "balance_port")) - - scenarios = 1 - problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) - status = problem.solver.Solve() - - assert status == problem.solver.OPTIMAL - assert problem.solver.Objective().Value() == 30 * 100 + 50 * 50 - - def test_timeseries() -> None: """ Basic case with 2 timesteps, where the demand is 100 on first timestep and 50 on second timestep. @@ -215,11 +56,17 @@ def test_timeseries() -> None: database.add_data("G", "p_max", ConstantData(100)) database.add_data("G", "cost", ConstantData(30)) - - demand_data = TimeScenarioSeriesData( - {TimeScenarioIndex(0, 0): 100, TimeScenarioIndex(1, 0): 50} + demand_data = pd.DataFrame( + [ + [100], + [50], + ], + index=[0, 1], + columns=[0], ) - database.add_data("D", "demand", demand_data) + + demand_time_scenario_series = TimeScenarioSeriesData(demand_data) + database.add_data("D", "demand", demand_time_scenario_series) node = Node(model=NODE_BALANCE_MODEL, id="1") demand = create_component( @@ -326,141 +173,21 @@ def test_variable_bound() -> None: status = problem.solver.Solve() assert status == problem.solver.INFEASIBLE # Infeasible + network = create_one_node_network(generator_model) + database = create_simple_database(max_generation=0) # Equal upper and lower bounds + with pytest.raises( + ValueError, + match="Upper and lower bounds of variable G_generation have the same value: 0", + ): + problem = build_problem(network, database, TimeBlock(1, [0]), 1) -def test_spillage() -> None: - """ - Balance on one node with one fixed demand and 1 generation higher than demand and 1 timestep . - """ - - database = DataBase() - database.add_data("D", "demand", ConstantData(150)) - database.add_data("S", "cost", ConstantData(10)) - - database.add_data("G1", "p_max", ConstantData(300)) - database.add_data("G1", "p_min", ConstantData(200)) - database.add_data("G1", "cost", ConstantData(30)) - - node = Node(model=NODE_BALANCE_MODEL, id="1") - spillage = create_component(model=SPILLAGE_MODEL, id="S") - demand = create_component(model=DEMAND_MODEL, id="D") - - gen1 = create_component( - model=GENERATOR_MODEL_WITH_PMIN, - id="G1", - ) - - network = Network("test") - network.add_node(node) - network.add_component(demand) - network.add_component(gen1) - network.add_component(spillage) - network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) - network.connect(PortRef(gen1, "balance_port"), PortRef(node, "balance_port")) - network.connect(PortRef(spillage, "balance_port"), PortRef(node, "balance_port")) - - problem = build_problem(network, database, TimeBlock(0, [1]), 1) - status = problem.solver.Solve() - - assert status == problem.solver.OPTIMAL - assert problem.solver.Objective().Value() == 30 * 200 + 50 * 10 - - -def test_min_up_down_times() -> None: - """ - Model on 3 time steps with one thermal generation and one demand on a single node. - - Demand is the following time series : [500 MW, 0, 0] - - Thermal generation is characterized with: - - P_min = 100 MW - - P_max = 500 MW - - Min up/down time = 3 - - Generation cost = 100€ / MWh - - Unsupplied energy = 3000 €/MWh - - Spillage = 10 €/MWh - - The optimal solution consists is turning on the thermal plant, which must then stay on for the 3 timesteps and producing [500, 100, 100] to satisfy P_min constraints. - - The optimal cost is then : - 500 x 100 (prod step 1) - + 100 x 100 (prod step 2) - + 100 x 100 (prod step 3) - + 100 x 10 (spillage step 2) - + 100 x 10 (spillage step 3) - = 72 000 - """ - - database = DataBase() - - database.add_data("G", "p_max", ConstantData(500)) - database.add_data("G", "p_min", ConstantData(100)) - database.add_data("G", "cost", ConstantData(100)) - database.add_data("G", "d_min_up", ConstantData(3)) - database.add_data("G", "d_min_down", ConstantData(3)) - database.add_data("G", "nb_units_max", ConstantData(1)) - database.add_data("G", "nb_failures", ConstantData(0)) - - database.add_data("U", "cost", ConstantData(3000)) - database.add_data("S", "cost", ConstantData(10)) - - demand_data = TimeScenarioSeriesData( - { - TimeScenarioIndex(0, 0): 500, - TimeScenarioIndex(1, 0): 0, - TimeScenarioIndex(2, 0): 0, - } - ) - database.add_data("D", "demand", demand_data) - - time_block = TimeBlock(1, [0, 1, 2]) - scenarios = 1 - - node = Node(model=NODE_BALANCE_MODEL, id="1") - demand = create_component(model=DEMAND_MODEL, id="D") - - gen = create_component(model=THERMAL_CLUSTER_MODEL_HD, id="G") - - spillage = create_component(model=SPILLAGE_MODEL, id="S") - - unsupplied_energy = create_component(model=UNSUPPLIED_ENERGY_MODEL, id="U") - - network = Network("test") - network.add_node(node) - network.add_component(demand) - network.add_component(gen) - network.add_component(spillage) - network.add_component(unsupplied_energy) - network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) - network.connect(PortRef(gen, "balance_port"), PortRef(node, "balance_port")) - network.connect(PortRef(spillage, "balance_port"), PortRef(node, "balance_port")) - network.connect( - PortRef(unsupplied_energy, "balance_port"), PortRef(node, "balance_port") - ) - - problem = build_problem( - network, - database, - time_block, - scenarios, - border_management=BlockBorderManagement.CYCLE, - ) - status = problem.solver.Solve() - - assert status == problem.solver.OPTIMAL - assert problem.solver.Objective().Value() == 72000 - - output = OutputValues(problem) - expected_output = OutputValues() - expected_output.component("G").var("generation").value = [[500.0, 100.0, 100.0]] - expected_output.component("G").var("nb_on").value = [[1.0, 1.0, 1.0]] - expected_output.component("G").var("nb_start").value = [[-0.0, 0.0, 0.0]] - expected_output.component("G").var("nb_stop").value = [[0.0, 0.0, 0.0]] - - expected_output.component("S").var("spillage").value = [[0.0, 100.0, 100.0]] - expected_output.component("U").var("unsupplied_energy").value = [[0.0, 0.0, 0.0]] - - # TODO this test should pass with the next port implementation - # assert output == expected_output, f"Output differs from expected: {output}" - - print(f"Variables values: {output}") + network = create_one_node_network(generator_model) + database = create_simple_database(max_generation=-10) + with pytest.raises( + ValueError, + match=r"Upper bound \(-10\) must be strictly greater than lower bound \(0\) for variable G_generation", + ): + problem = build_problem(network, database, TimeBlock(1, [0]), 1) def generate_data( @@ -473,7 +200,10 @@ def generate_data( data[TimeScenarioIndex(absolute_timestep, scenario)] = -18 else: data[TimeScenarioIndex(absolute_timestep, scenario)] = 2 * efficiency - return TimeScenarioSeriesData(time_scenario_series=data) + + values = [value for value in data.values()] + data_df = pd.DataFrame(values, columns=["Value"]) + return TimeScenarioSeriesData(data_df) def short_term_storage_base(efficiency: float, horizon: int) -> None: @@ -530,7 +260,7 @@ def short_term_storage_base(efficiency: float, horizon: int) -> None: # The short-term storage should satisfy the load # No spillage / unsupplied energy is expected - assert problem.solver.Objective().Value() == 0 + assert problem.solver.Objective().Value() == pytest.approx(0, abs=0.01) count_variables = 0 for variable in problem.solver.variables(): diff --git a/tests/functional/test_andromede_yml.py b/tests/functional/test_andromede_yml.py new file mode 100644 index 00000000..c959f0f4 --- /dev/null +++ b/tests/functional/test_andromede_yml.py @@ -0,0 +1,483 @@ +import pandas as pd +import pytest + +from andromede.expression import literal, param, var +from andromede.expression.indexing_structure import IndexingStructure +from andromede.model import Model, ModelPort, float_parameter, float_variable, model +from andromede.model.model import PortFieldDefinition, PortFieldId +from andromede.simulation import ( + BlockBorderManagement, + OutputValues, + TimeBlock, + build_problem, +) +from andromede.study import ( + ConstantData, + DataBase, + Network, + Node, + PortRef, + TimeScenarioIndex, + TimeScenarioSeriesData, + create_component, +) + + +def test_network(lib) -> None: + network = Network("test") + assert network.id == "test" + assert list(network.nodes) == [] + assert list(network.components) == [] + assert list(network.all_components) == [] + assert list(network.connections) == [] + + with pytest.raises(KeyError): + network.get_node("N") + + node_model = lib.models["node"] + + N1 = Node(model=node_model, id="N1") + N2 = Node(model=node_model, id="N2") + network.add_node(N1) + network.add_node(N2) + assert list(network.nodes) == [N1, N2] + assert network.get_node(N1.id) == N1 + assert network.get_component("N1") == Node(model=node_model, id="N1") + with pytest.raises(KeyError): + network.get_component("unknown") + + +def test_basic_balance(lib) -> None: + """ + Balance on one node with one fixed demand and one generation, on 1 timestep. + """ + + database = DataBase() + database.add_data("D", "demand", ConstantData(100)) + + database.add_data("G", "p_max", ConstantData(100)) + database.add_data("G", "cost", ConstantData(30)) + + node_model = lib.models["node"] + demand_model = lib.models["demand"] + production_model = lib.models["production"] + + node = Node(model=node_model, id="N") + demand = create_component( + model=demand_model, + id="D", + ) + + gen = create_component( + model=production_model, + id="G", + ) + + network = Network("test") + network.add_node(node) + network.add_component(demand) + network.add_component(gen) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(gen, "balance_port"), PortRef(node, "balance_port")) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == 3000 + + +def test_link(lib) -> None: + """ + Balance on one node with one fixed demand and one generation, on 1 timestep. + """ + + database = DataBase() + database.add_data("D", "demand", ConstantData(100)) + + database.add_data("G", "p_max", ConstantData(100)) + database.add_data("G", "cost", ConstantData(35)) + + database.add_data("L", "f_max", ConstantData(150)) + + node_model = lib.models["node"] + demand_model = lib.models["demand"] + production_model = lib.models["production"] + link_model = lib.models["link"] + + node1 = Node(model=node_model, id="1") + node2 = Node(model=node_model, id="2") + demand = create_component( + model=demand_model, + id="D", + ) + gen = create_component( + model=production_model, + id="G", + ) + link = create_component( + model=link_model, + id="L", + ) + + network = Network("test") + network.add_node(node1) + network.add_node(node2) + network.add_component(demand) + network.add_component(gen) + network.add_component(link) + network.connect(PortRef(demand, "balance_port"), PortRef(node1, "balance_port")) + network.connect(PortRef(gen, "balance_port"), PortRef(node2, "balance_port")) + network.connect(PortRef(link, "in_port"), PortRef(node1, "balance_port")) + network.connect(PortRef(link, "out_port"), PortRef(node2, "balance_port")) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == 3500 + + for variable in problem.solver.variables(): + if "balance_port_from" in variable.name(): + assert variable.solution_value() == 100 + if "balance_port_to" in variable.name(): + assert variable.solution_value() == -100 + + +def test_stacking_generation(lib) -> None: + """ + Balance on one node with one fixed demand and 2 generations with different costs, on 1 timestep. + """ + + database = DataBase() + database.add_data("D", "demand", ConstantData(150)) + + database.add_data("G1", "p_max", ConstantData(100)) + database.add_data("G1", "cost", ConstantData(30)) + + database.add_data("G2", "p_max", ConstantData(100)) + database.add_data("G2", "cost", ConstantData(50)) + + node_model = lib.models["node"] + demand_model = lib.models["demand"] + production_model = lib.models["production"] + + node1 = Node(model=node_model, id="1") + + demand = create_component( + model=demand_model, + id="D", + ) + + gen1 = create_component( + model=production_model, + id="G1", + ) + + gen2 = create_component( + model=production_model, + id="G2", + ) + + network = Network("test") + network.add_node(node1) + network.add_component(demand) + network.add_component(gen1) + network.add_component(gen2) + network.connect(PortRef(demand, "balance_port"), PortRef(node1, "balance_port")) + network.connect(PortRef(gen1, "balance_port"), PortRef(node1, "balance_port")) + network.connect(PortRef(gen2, "balance_port"), PortRef(node1, "balance_port")) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == 30 * 100 + 50 * 50 + + +def test_spillage(lib) -> None: + """ + Balance on one node with one fixed demand and 1 generation higher than demand and 1 timestep . + """ + + database = DataBase() + database.add_data("D", "demand", ConstantData(150)) + database.add_data("S", "cost", ConstantData(10)) + + database.add_data("G1", "p_max", ConstantData(300)) + database.add_data("G1", "p_min", ConstantData(200)) + database.add_data("G1", "cost", ConstantData(30)) + + node_model = lib.models["node"] + demand_model = lib.models["demand"] + production_with_min_model = lib.models["production_with_min"] + spillage_model = lib.models["spillage"] + + node = Node(model=node_model, id="1") + spillage = create_component(model=spillage_model, id="S") + demand = create_component(model=demand_model, id="D") + + gen1 = create_component(model=production_with_min_model, id="G1") + + network = Network("test") + network.add_node(node) + network.add_component(demand) + network.add_component(gen1) + network.add_component(spillage) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(gen1, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(spillage, "balance_port"), PortRef(node, "balance_port")) + + problem = build_problem(network, database, TimeBlock(0, [1]), 1) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == 30 * 200 + 50 * 10 + + +def test_min_up_down_times(lib) -> None: + """ + Model on 3 time steps with one thermal generation and one demand on a single node. + - Demand is the following time series : [500 MW, 0, 0] + - Thermal generation is characterized with: + - P_min = 100 MW + - P_max = 500 MW + - Min up/down time = 3 + - Generation cost = 100€ / MWh + - Unsupplied energy = 3000 €/MWh + - Spillage = 10 €/MWh + + The optimal solution consists is turning on the thermal plant, which must then stay on for the 3 timesteps and producing [500, 100, 100] to satisfy P_min constraints. + + The optimal cost is then : + 500 x 100 (prod step 1) + + 100 x 100 (prod step 2) + + 100 x 100 (prod step 3) + + 100 x 10 (spillage step 2) + + 100 x 10 (spillage step 3) + = 72 000 + """ + + database = DataBase() + + database.add_data("G", "p_max", ConstantData(500)) + database.add_data("G", "p_min", ConstantData(100)) + database.add_data("G", "cost", ConstantData(100)) + database.add_data("G", "d_min_up", ConstantData(3)) + database.add_data("G", "d_min_down", ConstantData(3)) + database.add_data("G", "nb_units_max", ConstantData(1)) + database.add_data("G", "nb_failures", ConstantData(0)) + + database.add_data("U", "cost", ConstantData(3000)) + database.add_data("S", "cost", ConstantData(10)) + + demand_data = pd.DataFrame( + [ + [500], + [0], + [0], + ], + index=[0, 1, 2], + columns=[0], + ) + demand_time_scenario_series = TimeScenarioSeriesData(demand_data) + database.add_data("D", "demand", demand_time_scenario_series) + + time_block = TimeBlock(1, [0, 1, 2]) + scenarios = 1 + + node_model = lib.models["node"] + demand_model = lib.models["demand"] + spillage_model = lib.models["spillage"] + unsuplied_model = lib.models["unsuplied"] + thermal_cluster = lib.models["thermal_cluster"] + + node = Node(model=node_model, id="1") + demand = create_component(model=demand_model, id="D") + + gen = create_component(model=thermal_cluster, id="G") + + spillage = create_component(model=spillage_model, id="S") + + unsupplied_energy = create_component(model=unsuplied_model, id="U") + + network = Network("test") + network.add_node(node) + network.add_component(demand) + network.add_component(gen) + network.add_component(spillage) + network.add_component(unsupplied_energy) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(gen, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(spillage, "balance_port"), PortRef(node, "balance_port")) + network.connect( + PortRef(unsupplied_energy, "balance_port"), PortRef(node, "balance_port") + ) + + problem = build_problem( + network, + database, + time_block, + scenarios, + border_management=BlockBorderManagement.CYCLE, + ) + status = problem.solver.Solve() + + print(OutputValues(problem).component("G").var("nb_units_on").value) + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == pytest.approx(72000, abs=0.01) + + +def test_changing_demand(lib) -> None: + """ + Model on 3 time steps simple production, demand + - P_max = 500 MW + - Generation cost = 100€ / MWh + """ + + database = DataBase() + + database.add_data("G", "p_max", ConstantData(500)) + database.add_data("G", "cost", ConstantData(100)) + + demand_data = pd.DataFrame( + [ + [300], + [100], + [0], + ], + index=[0, 1, 2], + columns=[0], + ) + demand_time_scenario_series = TimeScenarioSeriesData(demand_data) + database.add_data("D", "demand", demand_time_scenario_series) + + time_block = TimeBlock(1, [0, 1, 2]) + scenarios = 1 + + node_model = lib.models["node"] + demand_model = lib.models["demand"] + production_model = lib.models["production"] + + node = Node(model=node_model, id="1") + demand = create_component(model=demand_model, id="D") + + prod = create_component(model=production_model, id="G") + + network = Network("test") + network.add_node(node) + network.add_component(demand) + network.add_component(prod) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(prod, "balance_port"), PortRef(node, "balance_port")) + + problem = build_problem( + network, + database, + time_block, + scenarios, + border_management=BlockBorderManagement.CYCLE, + ) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == 40000 + + +def test_min_up_down_times_2(lib) -> None: + """ + Model on 3 time steps with one thermal generation and one demand on a single node. + - Demand is the following time series : [500 MW, 0, 0] + - Thermal generation is characterized with: + - P_min = 100 MW + - P_max = 500 MW + - Min up/down time = 2 + - Generation cost = 100€ / MWh + - Unsupplied energy = 3000 €/MWh + - Spillage = 10 €/MWh + + The optimal solution consists is turning on the thermal plant, which must then stay on for the 3 timesteps and producing [500, 100, 100] to satisfy P_min constraints. + + The optimal cost is then : + 500 x 100 (prod step 1) + + 100 x 100 (prod step 2) + + 0 x 100 (prod step 3) + + 100 x 10 (spillage step 2) + + 0 x 10 (spillage step 3) + = 61 000 + """ + + database = DataBase() + + database.add_data("G", "p_max", ConstantData(500)) + database.add_data("G", "p_min", ConstantData(100)) + database.add_data("G", "cost", ConstantData(100)) + database.add_data("G", "d_min_up", ConstantData(2)) + database.add_data("G", "d_min_down", ConstantData(1)) + database.add_data("G", "nb_units_max", ConstantData(1)) + database.add_data("G", "nb_failures", ConstantData(0)) + + database.add_data("U", "cost", ConstantData(3000)) + database.add_data("S", "cost", ConstantData(10)) + + demand_data = pd.DataFrame( + [ + [500], + [0], + [0], + ], + index=[0, 1, 2], + columns=[0], + ) + demand_time_scenario_series = TimeScenarioSeriesData(demand_data) + database.add_data("D", "demand", demand_time_scenario_series) + + time_block = TimeBlock(1, [0, 1, 2]) + scenarios = 1 + + node_model = lib.models["node"] + demand_model = lib.models["demand"] + spillage_model = lib.models["spillage"] + unsuplied_model = lib.models["unsuplied"] + thermal_cluster = lib.models["thermal_cluster"] + + node = Node(model=node_model, id="1") + demand = create_component(model=demand_model, id="D") + + gen = create_component(model=thermal_cluster, id="G") + + spillage = create_component(model=spillage_model, id="S") + + unsupplied_energy = create_component(model=unsuplied_model, id="U") + + network = Network("test") + network.add_node(node) + network.add_component(demand) + network.add_component(gen) + network.add_component(spillage) + network.add_component(unsupplied_energy) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(gen, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(spillage, "balance_port"), PortRef(node, "balance_port")) + network.connect( + PortRef(unsupplied_energy, "balance_port"), PortRef(node, "balance_port") + ) + + problem = build_problem( + network, + database, + time_block, + scenarios, + border_management=BlockBorderManagement.CYCLE, + ) + status = problem.solver.Solve() + + print(problem.solver.ExportModelAsMpsFormat(fixed_format=False, obfuscated=False)) + print(OutputValues(problem).component("G").var("nb_units_on").value) + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == pytest.approx(61000) diff --git a/tests/functional/test_performance.py b/tests/functional/test_performance.py index 28d5d88d..1c50af1c 100644 --- a/tests/functional/test_performance.py +++ b/tests/functional/test_performance.py @@ -10,15 +10,20 @@ # # This file is part of the Antares project. -from andromede.expression.expression import literal, param +from typing import cast + +import pytest + +from andromede.expression.expression import ExpressionNode, literal, param, var from andromede.expression.indexing_structure import IndexingStructure from andromede.libs.standard import ( BALANCE_PORT_TYPE, DEMAND_MODEL, GENERATOR_MODEL, + GENERATOR_MODEL_WITH_STORAGE, NODE_BALANCE_MODEL, ) -from andromede.model import ModelPort, float_parameter, model +from andromede.model import float_parameter, float_variable, model from andromede.simulation import TimeBlock, build_problem from andromede.study import ( ConstantData, @@ -28,16 +33,18 @@ PortRef, create_component, ) -from tests.unittests.test_utils import generate_data +from tests.unittests.test_utils import generate_scalar_matrix_data -def test_large_sum_with_loop() -> None: - """ - Test performance when the problem involves an expression with a high number of terms. Here the objective function is the sum over nb_terms terms. +def test_large_sum_inside_model_with_loop() -> None: """ + Test performance when the problem involves an expression with a high number of terms. + Here the objective function is the sum over nb_terms terms on a for-loop inside the model - # This test pass with 476 terms but fails with 477 locally due to recursion depth, and even less terms are possible with Jenkins... - nb_terms = 100 + This test pass with 476 terms but fails with 477 locally due to recursion depth, + and even less terms are possible with Jenkins... + """ + nb_terms = 500 time_blocks = [TimeBlock(0, [0])] scenarios = 1 @@ -46,38 +53,38 @@ def test_large_sum_with_loop() -> None: for i in range(1, nb_terms): database.add_data("simple_cost", f"cost_{i}", ConstantData(1 / i)) - SIMPLE_COST_MODEL = model( - id="SIMPLE_COST", - parameters=[ - float_parameter(f"cost_{i}", IndexingStructure(False, False)) - for i in range(1, nb_terms) - ], - objective_operational_contribution=sum( - [param(f"cost_{i}") for i in range(1, nb_terms)] - ), - ) - - network = Network("test") + with pytest.raises(RecursionError, match="maximum recursion depth exceeded"): + SIMPLE_COST_MODEL = model( + id="SIMPLE_COST", + parameters=[ + float_parameter(f"cost_{i}", IndexingStructure(False, False)) + for i in range(1, nb_terms) + ], + objective_operational_contribution=cast( + ExpressionNode, sum(param(f"cost_{i}") for i in range(1, nb_terms)) + ), + ) - # for i in range(1, nb_terms + 1): - cost_model = create_component(model=SIMPLE_COST_MODEL, id="simple_cost") - network.add_component(cost_model) + # Won't run because last statement will raise the error + network = Network("test") + cost_model = create_component(model=SIMPLE_COST_MODEL, id="simple_cost") + network.add_component(cost_model) - problem = build_problem(network, database, time_blocks[0], scenarios) - status = problem.solver.Solve() + problem = build_problem(network, database, time_blocks[0], scenarios) + status = problem.solver.Solve() - assert status == problem.solver.OPTIMAL - assert problem.solver.Objective().Value() == sum( - [1 / i for i in range(1, nb_terms)] - ) + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == sum( + [1 / i for i in range(1, nb_terms)] + ) -def test_large_sum_outside_model() -> None: +def test_large_sum_outside_model_with_loop() -> None: """ - Test performance when the problem involves an expression with a high number of terms. Here the objective function is the sum over nb_terms terms. + Test performance when the problem involves an expression with a high number of terms. + Here the objective function is the sum over nb_terms terms on a for-loop outside the model """ - - nb_terms = 10000 + nb_terms = 10_000 time_blocks = [TimeBlock(0, [0])] scenarios = 1 @@ -93,7 +100,6 @@ def test_large_sum_outside_model() -> None: network = Network("test") - # for i in range(1, nb_terms + 1): simple_model = create_component( model=SIMPLE_COST_MODEL, id="simple_cost", @@ -104,10 +110,101 @@ def test_large_sum_outside_model() -> None: status = problem.solver.Solve() assert status == problem.solver.OPTIMAL - assert problem.solver.Objective().Value() == sum( - [1 / i for i in range(1, nb_terms)] + assert problem.solver.Objective().Value() == obj_coeff + + +def test_large_sum_inside_model_with_sum_operator() -> None: + """ + Test performance when the problem involves an expression with a high number of terms. + Here the objective function is the sum over nb_terms terms with the sum() operator inside the model + """ + nb_terms = 10_000 + + scenarios = 1 + time_blocks = [TimeBlock(0, list(range(nb_terms)))] + database = DataBase() + + # Weird values when the "cost" varies over time and we use the sum() operator: + # For testing purposes, will use a const value since the problem seems to come when + # we try to linearize nb_terms variables with nb_terms distinct parameters + # TODO check the sum() operator for time-variable parameters + database.add_data("simple_cost", "cost", ConstantData(3)) + + SIMPLE_COST_MODEL = model( + id="SIMPLE_COST", + parameters=[ + float_parameter("cost", IndexingStructure(False, False)), + ], + variables=[ + float_variable( + "var", + lower_bound=literal(1), + upper_bound=literal(2), + structure=IndexingStructure(True, False), + ), + ], + objective_operational_contribution=(param("cost") * var("var")).sum(), ) + network = Network("test") + + cost_model = create_component(model=SIMPLE_COST_MODEL, id="simple_cost") + network.add_component(cost_model) + + problem = build_problem(network, database, time_blocks[0], scenarios) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == 3 * nb_terms + + +def test_large_sum_of_port_connections() -> None: + """ + Test performance when the problem involves a model where several generators are connected to a node. + + This test pass with 470 terms but fails with 471 locally due to recursion depth, + and possibly even less terms are possible with Jenkins... + """ + nb_generators = 500 + + time_block = TimeBlock(0, [0]) + scenarios = 1 + + database = DataBase() + database.add_data("D", "demand", ConstantData(nb_generators)) + + for gen_id in range(nb_generators): + database.add_data(f"G_{gen_id}", "p_max", ConstantData(1)) + database.add_data(f"G_{gen_id}", "cost", ConstantData(5)) + + node = Node(model=NODE_BALANCE_MODEL, id="N") + demand = create_component(model=DEMAND_MODEL, id="D") + generators = [ + create_component(model=GENERATOR_MODEL, id=f"G_{gen_id}") + for gen_id in range(nb_generators) + ] + + network = Network("test") + network.add_node(node) + + network.add_component(demand) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + + for gen_id in range(nb_generators): + network.add_component(generators[gen_id]) + network.connect( + PortRef(generators[gen_id], "balance_port"), PortRef(node, "balance_port") + ) + + with pytest.raises(RecursionError, match="maximum recursion depth exceeded"): + problem = build_problem(network, database, time_block, scenarios) + + # Won't run because last statement will raise the error + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == 5 * nb_generators + def test_basic_balance_on_whole_year() -> None: """ @@ -119,7 +216,9 @@ def test_basic_balance_on_whole_year() -> None: time_block = TimeBlock(1, list(range(horizon))) database = DataBase() - database.add_data("D", "demand", generate_data(100, horizon, scenarios)) + database.add_data( + "D", "demand", generate_scalar_matrix_data(100, horizon, scenarios) + ) database.add_data("G", "p_max", ConstantData(100)) database.add_data("G", "cost", ConstantData(30)) @@ -140,4 +239,42 @@ def test_basic_balance_on_whole_year() -> None: status = problem.solver.Solve() assert status == problem.solver.OPTIMAL - assert problem.solver.Objective().Value() == 3000 * horizon + assert problem.solver.Objective().Value() == 30 * 100 * horizon + + +def test_basic_balance_on_whole_year_with_large_sum() -> None: + """ + Balance on one node with one fixed demand and one generation with storage, on 8760 timestep. + """ + + scenarios = 1 + horizon = 8760 + time_block = TimeBlock(1, list(range(horizon))) + + database = DataBase() + database.add_data( + "D", "demand", generate_scalar_matrix_data(100, horizon, scenarios) + ) + + database.add_data("G", "p_max", ConstantData(100)) + database.add_data("G", "cost", ConstantData(30)) + database.add_data("G", "full_storage", ConstantData(100 * horizon)) + + node = Node(model=NODE_BALANCE_MODEL, id="N") + demand = create_component(model=DEMAND_MODEL, id="D") + gen = create_component( + model=GENERATOR_MODEL_WITH_STORAGE, id="G" + ) # Limits the total generation inside a TimeBlock + + network = Network("test") + network.add_node(node) + network.add_component(demand) + network.add_component(gen) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(gen, "balance_port"), PortRef(node, "balance_port")) + + problem = build_problem(network, database, time_block, scenarios) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == 30 * 100 * horizon diff --git a/tests/functional/test_stochastic.py b/tests/functional/test_stochastic.py index d19914f1..97fc7971 100644 --- a/tests/functional/test_stochastic.py +++ b/tests/functional/test_stochastic.py @@ -29,7 +29,7 @@ PortRef, create_component, ) -from tests.unittests.test_utils import generate_data +from tests.unittests.test_utils import generate_scalar_matrix_data @pytest.fixture @@ -46,11 +46,19 @@ def scenarios() -> int: def database(horizon: int, scenarios: int) -> DataBase: database = DataBase() - database.add_data("D", "demand", generate_data(500, horizon, scenarios)) - - database.add_data("BASE", "nb_failures", generate_data(1, horizon, scenarios)) - database.add_data("SEMIBASE", "nb_failures", generate_data(1, horizon, scenarios)) - database.add_data("PEAK", "nb_failures", generate_data(1, horizon, scenarios)) + database.add_data( + "D", "demand", generate_scalar_matrix_data(500, horizon, scenarios) + ) + + database.add_data( + "BASE", "nb_failures", generate_scalar_matrix_data(1, horizon, scenarios) + ) + database.add_data( + "SEMIBASE", "nb_failures", generate_scalar_matrix_data(1, horizon, scenarios) + ) + database.add_data( + "PEAK", "nb_failures", generate_scalar_matrix_data(1, horizon, scenarios) + ) database.add_data("BASE", "p_max", ConstantData(250)) database.add_data("BASE", "p_min", ConstantData(100)) diff --git a/tests/functional/test_xpansion.py b/tests/functional/test_xpansion.py index b36307b3..ddf63e21 100644 --- a/tests/functional/test_xpansion.py +++ b/tests/functional/test_xpansion.py @@ -10,6 +10,7 @@ # # This file is part of the Antares project. +import pandas as pd import pytest from andromede.expression.expression import literal, param, var @@ -46,7 +47,6 @@ Network, Node, PortRef, - TimeScenarioIndex, TimeScenarioSeriesData, create_component, ) @@ -312,11 +312,7 @@ def test_generation_xpansion_two_time_steps_two_scenarios( horizon = 2 time_block = TimeBlock(1, list(range(horizon))) - data = {} - data[TimeScenarioIndex(0, 0)] = 300 - data[TimeScenarioIndex(1, 0)] = 500 - data[TimeScenarioIndex(0, 1)] = 200 - data[TimeScenarioIndex(1, 1)] = 400 + data = pd.DataFrame([[300, 200], [500, 400]], index=[0, 1], columns=[0, 1]) demand_data = TimeScenarioSeriesData(time_scenario_series=data) diff --git a/tests/integration/test_benders_decomposed.py b/tests/integration/test_benders_decomposed.py index 1990029a..4521084c 100644 --- a/tests/integration/test_benders_decomposed.py +++ b/tests/integration/test_benders_decomposed.py @@ -10,6 +10,7 @@ # # This file is part of the Antares project. +import pandas as pd import pytest from andromede.expression.expression import literal, param, var @@ -48,13 +49,15 @@ Network, Node, PortRef, + ScenarioIndex, + ScenarioSeriesData, TimeIndex, + TimeScenarioSeriesData, TimeSeriesData, create_component, ) CONSTANT = IndexingStructure(False, False) -FREE = IndexingStructure(True, True) INVESTMENT = ProblemContext.INVESTMENT OPERATIONAL = ProblemContext.OPERATIONAL @@ -116,7 +119,8 @@ def discrete_candidate() -> Model: structure=CONSTANT, context=COUPLING, ), - int_variable( + # TODO set it back to int_variable + float_variable( "nb_units", lower_bound=literal(0), upper_bound=literal(10), @@ -177,8 +181,8 @@ def test_benders_decomposed_integration( ) -> None: """ Simple generation expansion problem on one node, one timestep and one scenario - but this time with two candidates: one thermal cluster and one wind cluster. - We separate master/subproblem and export the problems in MPS format to be solved by the Benders solver in Xpansion + but this time with two candidates: one continuous and one discrete. + We separate master/subproblem and export the problems in MPS format to be solved by the Benders and MergeMPS Demand = 400 Generator : P_max : 200, Cost : 45 @@ -187,13 +191,13 @@ def test_benders_decomposed_integration( -> 200 of unsupplied energy -> Total cost without investment = 45 * 200 + 501 * 200 = 109_200 - Single candidate : Invest cost : 490 / MW; Prod cost : 10 - Cluster candidate : Invest cost : 200 / MW; Prod cost : 10; Nb of discrete thresholds: 10; Prod per threshold: 10 + Continuos candidate : Invest cost : 490 / MW; Prod cost : 10 + Discrete candidate : Invest cost : 200 / MW; Prod cost : 10; Nb of units: 10; Prod per unit: 10 - Optimal investment : 100 MW (Cluster) + 100 MW (Single) + Optimal investment : 100 MW (Discrete) + 100 MW (Continuos) - -> Optimal cost = 490 * 100 + 10 * 100 (Single) - + 200 * 100 + 10 * 100 (Cluster) + -> Optimal cost = 490 * 100 + 10 * 100 (Continuos) + + 200 * 100 + 10 * 100 (Discrete) + 45 * 200 (Generator) = 69_000 + 11_000 = 80_000 @@ -348,3 +352,184 @@ def test_benders_decomposed_multi_time_block_single_scenario( assert decomposed_solution.is_close( solution ), f"Solution differs from expected: {decomposed_solution}" + + +def test_benders_decomposed_single_time_block_multi_scenario( + generator: Component, + candidate: Component, +) -> None: + """ + Simple generation xpansion problem on one node. One time block with one timestep each, + two scenarios, one thermal cluster candidate. + + Demand = [200; 300] + Generator : P_max : 200, Cost : 40 + Unsupplied energy : Cost : 1_000 + + -> [0; 100] of unsupplied energy + -> Total cost without investment = 0.5 * [(200 * 40)] + + 0.5 * [(200 * 40) + (100 * 1_000)] + = 58_000 + + Candidate : Invest cost : 480 / MW, Prod cost : 10 + + Optimal investment : 100 MW + + -> Optimal cost = 480 * 100 (investment) + + 0.5 * (10 * 100 + 40 * 100) (operational - scenario 1) + + 0.5 * (10 * 100 + 40 * 200) (operational - scenario 2) + = 55_000 + + """ + + data = {} + data[ScenarioIndex(0)] = 200 + data[ScenarioIndex(1)] = 300 + + demand_data = ScenarioSeriesData(scenario_series=data) + + database = DataBase() + database.add_data("D", "demand", demand_data) + + database.add_data("N", "spillage_cost", ConstantData(1)) + database.add_data("N", "ens_cost", ConstantData(1_000)) + + database.add_data("G1", "p_max", ConstantData(200)) + database.add_data("G1", "cost", ConstantData(40)) + + database.add_data("CAND", "op_cost", ConstantData(10)) + database.add_data("CAND", "invest_cost", ConstantData(480)) + + demand = create_component( + model=DEMAND_MODEL, + id="D", + ) + + node = Node(model=NODE_WITH_SPILL_AND_ENS, id="N") + network = Network("test") + network.add_node(node) + network.add_component(demand) + network.add_component(generator) + network.add_component(candidate) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(generator, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(candidate, "balance_port"), PortRef(node, "balance_port")) + + scenarios = 2 + blocks = [TimeBlock(1, [0])] + + config = InterDecisionTimeScenarioConfig(blocks, scenarios) + decision_tree_root = DecisionTreeNode("", config, network) + + xpansion = build_benders_decomposed_problem(decision_tree_root, database) + + data = { + "solution": { + "overall_cost": 55_000, + "values": { + "CAND_p_max": 100, + }, + } + } + solution = BendersSolution(data) + + assert xpansion.run() + decomposed_solution = xpansion.solution + if decomposed_solution is not None: # For mypy only + assert decomposed_solution.is_close( + solution + ), f"Solution differs from expected: {decomposed_solution}" + + +def test_benders_decomposed_multi_time_block_multi_scenario( + generator: Component, + candidate: Component, +) -> None: + """ + Simple generation xpansion problem on one node. One time block with one timestep each, + two scenarios, one thermal cluster candidate. + + Demand = [200 200; 100 300] + Generator : P_max : 200, Cost : 40 + Unsupplied energy : Cost : 1_000 + + -> [0 0; 0 100] of unsupplied energy + -> Total cost without investment = 0.5 * [(200 * 40) + (200 * 40)] + + 0.5 * [(100 * 40) + (200 * 40 + 100 * 1_000)] + = 64_000 + + Candidate : Invest cost : 480 / MW, Prod cost : 10 + + Optimal investment : 100 MW + + -> Optimal cost = 480 * 100 (investment) + + 0.5 * (10 * 100 + 40 * 100) (operational - time block 1 scenario 1) + + 0.5 * (10 * 100 + 40 * 100) (operational - time block 2 scenario 1) + + 0.5 * (10 * 100) (operational - time block 1 scenario 2) + + 0.5 * (10 * 100 + 40 * 200) (operational - time block 2 scenario 2) + = 58_000 + + """ + + data = pd.DataFrame( + [ + [200, 200], + [100, 300], + ], + index=[0, 1], + columns=[0, 1], + ) + + demand_data = TimeScenarioSeriesData(time_scenario_series=data) + + database = DataBase() + database.add_data("D", "demand", demand_data) + + database.add_data("N", "spillage_cost", ConstantData(1)) + database.add_data("N", "ens_cost", ConstantData(1_000)) + + database.add_data("G1", "p_max", ConstantData(200)) + database.add_data("G1", "cost", ConstantData(40)) + + database.add_data("CAND", "op_cost", ConstantData(10)) + database.add_data("CAND", "invest_cost", ConstantData(480)) + + demand = create_component( + model=DEMAND_MODEL, + id="D", + ) + + node = Node(model=NODE_WITH_SPILL_AND_ENS, id="N") + network = Network("test") + network.add_node(node) + network.add_component(demand) + network.add_component(generator) + network.add_component(candidate) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(generator, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(candidate, "balance_port"), PortRef(node, "balance_port")) + + scenarios = 2 + blocks = [TimeBlock(1, [0]), TimeBlock(2, [1])] + + config = InterDecisionTimeScenarioConfig(blocks, scenarios) + decision_tree_root = DecisionTreeNode("", config, network) + + xpansion = build_benders_decomposed_problem(decision_tree_root, database) + + data = { + "solution": { + "overall_cost": 58_000, + "values": { + "CAND_p_max": 100, + }, + } + } + solution = BendersSolution(data) + + assert xpansion.run() + decomposed_solution = xpansion.solution + if decomposed_solution is not None: # For mypy only + assert decomposed_solution.is_close( + solution + ), f"Solution differs from expected: {decomposed_solution}" diff --git a/tests/models/conftest.py b/tests/models/conftest.py new file mode 100644 index 00000000..ee330ed3 --- /dev/null +++ b/tests/models/conftest.py @@ -0,0 +1,50 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +from pathlib import Path + +import pytest + +from andromede.model.parsing import parse_yaml_library +from andromede.model.resolve_library import resolve_library + + +@pytest.fixture(scope="session") +def libs_dir() -> Path: + return Path(__file__).parent / "libs" + + +@pytest.fixture(scope="session") +def data_dir() -> Path: + return Path(__file__).parents[1] / "unittests/data" + + +@pytest.fixture(scope="session") +def lib(data_dir: Path): + lib_file = data_dir / "lib.yml" + + with lib_file.open() as f: + input_lib = parse_yaml_library(f) + + lib = resolve_library(input_lib) + return lib + + +@pytest.fixture(scope="session") +def lib_sc(): + libs_path = Path(__file__).parents[2] / "src/andromede/libs/" + lib_sc_file = libs_path / "standard_sc.yml" + + with lib_sc_file.open() as f: + input_lib_sc = parse_yaml_library(f) + + lib_sc = resolve_library(input_lib_sc) + return lib_sc diff --git a/tests/models/libs/ac.yml b/tests/models/libs/ac.yml new file mode 100644 index 00000000..5a0bbdb8 --- /dev/null +++ b/tests/models/libs/ac.yml @@ -0,0 +1,149 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +library: + id: ac + description: | + AC network models + + Contains a few models useful for representing AC network, + through the DC approximation: + * An AC node model + * An AC link model + * An AC link model with a flow limit + * An AC link model with a PST + + port-types: + - id: ac-port + description: A port which transfers power flow and voltage angle + fields: + - name: flow + - name: angle + + models: + + # Node has 2 ports: one for angle dependent connections, + # one for power-only connections. + # Should we relax constraints on ports compatibility to allow to have only one here? + - id: ac-node + variables: + - name: angle + ports: + - name: injections + type: balance + - name: links + type: ac-port + port-field-definitions: + - port: links + field: angle + definition: angle + binding-constraints: + - name: Balance + expression: sum_connections(injections.flow) + sum_connections(links.flow) = 0 + + # Flow on the line is proportional to angle difference between extremities, + # and inverse of impedance. + - id: ac-link + description: A basic AC link model + parameters: + - name: reactance + time-dependent: false + scenario-dependent: false + variables: + - name: flow + ports: + - name: port1 + type: ac-port + - name: port2 + type: ac-port + port-field-definitions: + - port: port1 + field: flow + definition: -flow + - port: port2 + field: flow + definition: flow + binding-constraints: + - name: AC flow + expression: flow = 1 / reactance * (port1.angle - port2.angle) + + # Flow on the line is proportional to angle difference between extremities, + # and inverse of impedance. + # Flow value is restricted by the parameter flow_limit. + - id: ac-link-with-limit + description: A basic AC link model with a flow limit + parameters: + - name: reactance + time-dependent: false + scenario-dependent: false + - name: flow_limit + time-dependent: false + scenario-dependent: false + variables: + - name: flow + lower-bound: -flow_limit + upper-bound: flow_limit + ports: + - name: port1 + type: ac-port + - name: port2 + type: ac-port + port-field-definitions: + - port: port1 + field: flow + definition: -flow + - port: port2 + field: flow + definition: flow + binding-constraints: + - name: AC flow + expression: flow = 1 / reactance * (port1.angle - port2.angle) + + # Flow on the line is proportional to angle difference between extremities, + # and inverse of impedance. + # A shift angle is applied on side 1, allowing to control the flow. + # We associate a cost to the absolute value of phase shift, + # which may be useful to remove equivalent solutions. + - id: ac-link-with-pst + description: An AC link with a phase shifter + parameters: + - name: reactance + time-dependent: false + scenario-dependent: false + - name: phase_shift_cost + time-dependent: false + scenario-dependent: false + variables: + - name: flow + - name: phase_shift + - name: phase_shift_pos + lower-bound: 0 + - name: phase_shift_neg + lower-bound: 0 + ports: + - name: port1 + type: ac-port + - name: port2 + type: ac-port + port-field-definitions: + - port: port1 + field: flow + definition: -flow + - port: port2 + field: flow + definition: flow + constraints: + - name: Phase shift + expression: phase_shift = phase_shift_pos - phase_shift_neg + binding-constraints: + - name: AC flow + expression: flow = 1 / reactance * (port1.angle + phase_shift - port2.angle) + objective: expec(sum(phase_shift_cost * (phase_shift_pos + phase_shift_neg))) diff --git a/tests/models/test_ac_link.py b/tests/models/test_ac_link.py new file mode 100644 index 00000000..b94bab02 --- /dev/null +++ b/tests/models/test_ac_link.py @@ -0,0 +1,300 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +from pathlib import Path + +import pytest + +from andromede.libs.standard import BALANCE_PORT_TYPE, DEMAND_MODEL, GENERATOR_MODEL +from andromede.model.library import Library, library +from andromede.model.parsing import parse_yaml_library +from andromede.model.resolve_library import resolve_library +from andromede.simulation import OutputValues, TimeBlock, build_problem +from andromede.study import ( + ConstantData, + DataBase, + Network, + Node, + PortRef, + create_component, +) + + +@pytest.fixture +def std_lib() -> Library: + return library( + port_types=[BALANCE_PORT_TYPE], models=[GENERATOR_MODEL, DEMAND_MODEL] + ) + + +@pytest.fixture +def ac_lib(libs_dir: Path, std_lib: Library) -> Library: + lib_file = libs_dir / "ac.yml" + with lib_file.open() as f: + input_lib = parse_yaml_library(f) + return resolve_library(input_lib, preloaded_libraries=[std_lib]) + + +def test_ac_network_no_links(ac_lib: Library): + """ + The network only has one AC node where a generator and a demand are connected. + + There is actually no AC link connected to it, we just check that + generation matches demand on this node: + - demand = 100 + - cost = 30 + --> objective = 30 * 100 = 3000 + """ + ac_node_model = ac_lib.models["ac-node"] + + database = DataBase() + database.add_data("D", "demand", ConstantData(100)) + + database.add_data("G", "p_max", ConstantData(100)) + database.add_data("G", "cost", ConstantData(30)) + + node = Node(model=ac_node_model, id="N") + demand = create_component( + model=DEMAND_MODEL, + id="D", + ) + + gen = create_component( + model=GENERATOR_MODEL, + id="G", + ) + + network = Network("test") + network.add_node(node) + network.add_component(demand) + network.add_component(gen) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "injections")) + network.connect(PortRef(gen, "balance_port"), PortRef(node, "injections")) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == pytest.approx(3000, abs=0.01) + + +def test_ac_network(ac_lib: Library): + """ + The network only has 2 AC nodes connected by 1 AC link. + + Node 1 carries the demand of 100 MW, + node 2 carries the generator with a cost of 35 per MWh. + + We check that final cost matches the demand: 100 * 35 = 3500, + and that flow on the line is -100 MW. + """ + ac_node_model = ac_lib.models["ac-node"] + ac_link_model = ac_lib.models["ac-link"] + + database = DataBase() + database.add_data("D", "demand", ConstantData(100)) + + database.add_data("G", "p_max", ConstantData(100)) + database.add_data("G", "cost", ConstantData(35)) + + database.add_data("L", "reactance", ConstantData(1)) + + node1 = Node(model=ac_node_model, id="1") + node2 = Node(model=ac_node_model, id="2") + demand = create_component( + model=DEMAND_MODEL, + id="D", + ) + + gen = create_component( + model=GENERATOR_MODEL, + id="G", + ) + + link = create_component( + model=ac_link_model, + id="L", + ) + + network = Network("test") + network.add_node(node1) + network.add_node(node2) + network.add_component(demand) + network.add_component(gen) + network.add_component(link) + network.connect(PortRef(demand, "balance_port"), PortRef(node1, "injections")) + network.connect(PortRef(gen, "balance_port"), PortRef(node2, "injections")) + network.connect(PortRef(link, "port1"), PortRef(node1, "links")) + network.connect(PortRef(link, "port2"), PortRef(node2, "links")) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == pytest.approx(3500, abs=0.01) + + assert OutputValues(problem).component("L").var("flow").value == pytest.approx( + -100, abs=0.01 + ) + + +def test_parallel_ac_links(ac_lib: Library): + """ + The network has 2 AC nodes connected by 2 parallel links, + where reactance is 1 for line L1, and 2 for line L2. + We expect flow to be te twice bigger on L1 than on L2. + + Node 1 carries the demand of 100 MW, + node 2 carries the generator with a cost of 35 per MWh. + + We check that final cost matches the demand: 100 * 35 = 3500, + and that flow on L1 is -66. MW while flow on L2 is only -33.3 MW. + """ + ac_node_model = ac_lib.models["ac-node"] + ac_link_model = ac_lib.models["ac-link"] + + database = DataBase() + database.add_data("D", "demand", ConstantData(100)) + + database.add_data("G", "p_max", ConstantData(100)) + database.add_data("G", "cost", ConstantData(35)) + + database.add_data("L1", "reactance", ConstantData(1)) + database.add_data("L2", "reactance", ConstantData(2)) + + node1 = Node(model=ac_node_model, id="1") + node2 = Node(model=ac_node_model, id="2") + demand = create_component( + model=DEMAND_MODEL, + id="D", + ) + gen = create_component( + model=GENERATOR_MODEL, + id="G", + ) + link1 = create_component( + model=ac_link_model, + id="L1", + ) + link2 = create_component( + model=ac_link_model, + id="L2", + ) + + network = Network("test") + network.add_node(node1) + network.add_node(node2) + network.add_component(demand) + network.add_component(gen) + network.add_component(link1) + network.add_component(link2) + network.connect(PortRef(demand, "balance_port"), PortRef(node1, "injections")) + network.connect(PortRef(gen, "balance_port"), PortRef(node2, "injections")) + network.connect(PortRef(link1, "port1"), PortRef(node1, "links")) + network.connect(PortRef(link1, "port2"), PortRef(node2, "links")) + network.connect(PortRef(link2, "port1"), PortRef(node1, "links")) + network.connect(PortRef(link2, "port2"), PortRef(node2, "links")) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == pytest.approx(3500, abs=0.01) + + assert OutputValues(problem).component("L1").var("flow").value == pytest.approx( + -66.67, abs=0.01 + ) + assert OutputValues(problem).component("L2").var("flow").value == pytest.approx( + -33.33, abs=0.01 + ) + + +def test_parallel_ac_links_with_pst(ac_lib: Library): + """ + Same case as in parallel_ac_links but: + - flow is restricted to 50 MW on line L1, so it cannot + anymore transfer 66,7MW + - flow can be influenced by a phase shifter on line L2 + + We expect the case to be feasible thanks to the phase shifter, + which will allow to balance the flow between the 2 lines. + Therefore we expect flows to be 50 MW on both lines. + + Objective value is 3500 (for generation) + 50 (for phase shift). + """ + ac_node_model = ac_lib.models["ac-node"] + ac_link_model = ac_lib.models["ac-link-with-limit"] + pst_model = ac_lib.models["ac-link-with-pst"] + + database = DataBase() + database.add_data("D", "demand", ConstantData(100)) + + database.add_data("G", "p_max", ConstantData(100)) + database.add_data("G", "cost", ConstantData(35)) + + database.add_data("L", "reactance", ConstantData(1)) + database.add_data("L", "flow_limit", ConstantData(50)) + database.add_data("T", "reactance", ConstantData(2)) + database.add_data("T", "flow_limit", ConstantData(50)) + database.add_data("T", "phase_shift_cost", ConstantData(1)) + + node1 = Node(model=ac_node_model, id="1") + node2 = Node(model=ac_node_model, id="2") + demand = create_component( + model=DEMAND_MODEL, + id="D", + ) + gen = create_component( + model=GENERATOR_MODEL, + id="G", + ) + link1 = create_component( + model=ac_link_model, + id="L", + ) + link2 = create_component( + model=pst_model, + id="T", + ) + + network = Network("test") + network.add_node(node1) + network.add_node(node2) + network.add_component(demand) + network.add_component(gen) + network.add_component(link1) + network.add_component(link2) + network.connect(PortRef(demand, "balance_port"), PortRef(node1, "injections")) + network.connect(PortRef(gen, "balance_port"), PortRef(node2, "injections")) + network.connect(PortRef(link1, "port1"), PortRef(node1, "links")) + network.connect(PortRef(link1, "port2"), PortRef(node2, "links")) + network.connect(PortRef(link2, "port1"), PortRef(node1, "links")) + network.connect(PortRef(link2, "port2"), PortRef(node2, "links")) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == pytest.approx(3550, abs=0.01) + + assert OutputValues(problem).component("L").var("flow").value == pytest.approx( + -50, abs=0.01 + ) + assert OutputValues(problem).component("T").var("flow").value == pytest.approx( + -50, abs=0.01 + ) + assert OutputValues(problem).component("T").var( + "phase_shift" + ).value == pytest.approx(-50, abs=0.01) diff --git a/tests/models/test_electrolyzer_n_inputs.py b/tests/models/test_electrolyzer_n_inputs.py new file mode 100644 index 00000000..6c6f5be5 --- /dev/null +++ b/tests/models/test_electrolyzer_n_inputs.py @@ -0,0 +1,413 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. + +import math + +from andromede.libs.standard import DEMAND_MODEL, GENERATOR_MODEL, NODE_BALANCE_MODEL +from andromede.libs.standard_sc import ( + CONVERTOR_MODEL, + CONVERTOR_RECEIVE_IN, + DECOMPOSE_1_FLOW_INTO_2_FLOW, + NODE_BALANCE_MODEL_MOD, + TWO_INPUTS_CONVERTOR_MODEL, +) +from andromede.simulation import OutputValues, TimeBlock, build_problem +from andromede.study import ( + ConstantData, + DataBase, + Network, + Node, + PortRef, + create_component, +) + +""" +for every following test we have two electrical productions with an electrolyzer converting to a gaz flow +we always have: + first electric production: + - p_max = 70 + - cost = 10 + second electric production: + - p_max = 80 + - cost = 20 + gaz production: + - p_max = 30 + - cost = 15 + first conversion rate: + - alpha = 0.7 + second conversion rate: + - alpha = 0.5 + for a gaz demand of 100 +""" + + +def test_electrolyzer_n_inputs_1(): + """ + Test with an electrolyzer for each input + + ep1 = electric production 1 + ep2 = electric production 2 + ez1 = electrolyzer 1 + ez2 = electrolyzer 2 + gp = gaz production + + total gaz production = flow_ep1 * alpha_ez1 + flow_ep2 * alpha_ez2 + flow_gp + + """ + elec_node_1 = Node(model=NODE_BALANCE_MODEL, id="e1") + electric_prod_1 = create_component(model=GENERATOR_MODEL, id="ep1") + electrolyzer1 = create_component(model=CONVERTOR_MODEL, id="ez1") + + elec_node_2 = Node(model=NODE_BALANCE_MODEL, id="e2") + electric_prod_2 = create_component(model=GENERATOR_MODEL, id="ep2") + electrolyzer2 = create_component(model=CONVERTOR_MODEL, id="ez2") + + gaz_node = Node(model=NODE_BALANCE_MODEL, id="g") + gaz_prod = create_component(model=GENERATOR_MODEL, id="gp") + gaz_demand = create_component(model=DEMAND_MODEL, id="gd") + + database = DataBase() + + database.add_data("ep1", "p_max", ConstantData(70)) + database.add_data("ep1", "cost", ConstantData(10)) + database.add_data("ez1", "alpha", ConstantData(0.7)) + + database.add_data("ep2", "p_max", ConstantData(80)) + database.add_data("ep2", "cost", ConstantData(20)) + database.add_data("ez2", "alpha", ConstantData(0.5)) + + database.add_data("gd", "demand", ConstantData(100)) + database.add_data("gp", "p_max", ConstantData(30)) + database.add_data("gp", "cost", ConstantData(15)) + + network = Network("test") + network.add_node(elec_node_1) + network.add_component(electric_prod_1) + network.add_component(electrolyzer1) + network.add_node(elec_node_2) + network.add_component(electric_prod_2) + network.add_component(electrolyzer2) + network.add_node(gaz_node) + network.add_component(gaz_prod) + network.add_component(gaz_demand) + + network.connect( + PortRef(electric_prod_1, "balance_port"), PortRef(elec_node_1, "balance_port") + ) + network.connect( + PortRef(elec_node_1, "balance_port"), PortRef(electrolyzer1, "FlowDI") + ) + network.connect(PortRef(electrolyzer1, "FlowDO"), PortRef(gaz_node, "balance_port")) + network.connect( + PortRef(electric_prod_2, "balance_port"), PortRef(elec_node_2, "balance_port") + ) + network.connect( + PortRef(elec_node_2, "balance_port"), PortRef(electrolyzer2, "FlowDI") + ) + network.connect(PortRef(electrolyzer2, "FlowDO"), PortRef(gaz_node, "balance_port")) + network.connect( + PortRef(gaz_node, "balance_port"), PortRef(gaz_demand, "balance_port") + ) + network.connect( + PortRef(gaz_prod, "balance_port"), PortRef(gaz_node, "balance_port") + ) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + output = OutputValues(problem) + ep1_gen = output.component("ep1").var("generation").value + ep2_gen = output.component("ep2").var("generation").value + gp_gen = output.component("gp").var("generation").value + print(ep1_gen) + print(ep2_gen) + print(gp_gen) + + assert math.isclose(ep1_gen, 70) + assert math.isclose(ep2_gen, 42) + assert math.isclose(gp_gen, 30) + + assert status == problem.solver.OPTIMAL + assert math.isclose(problem.solver.Objective().Value(), 1990) + + +def test_electrolyzer_n_inputs_2(): + """ + Test with one electrolyzer that has two inputs + + ep1 = electric production 1 + ep2 = electric production 2 + ez = electrolyzer + gp = gaz production + + total gaz production = flow_ep1 * alpha1_ez + flow_ep2 * alpha2_ez + flow_gp + """ + + elec_node_1 = Node(model=NODE_BALANCE_MODEL, id="e1") + elec_node_2 = Node(model=NODE_BALANCE_MODEL, id="e2") + gaz_node = Node(model=NODE_BALANCE_MODEL, id="g") + + electric_prod_1 = create_component(model=GENERATOR_MODEL, id="ep1") + electric_prod_2 = create_component(model=GENERATOR_MODEL, id="ep2") + + gaz_prod = create_component(model=GENERATOR_MODEL, id="gp") + gaz_demand = create_component(model=DEMAND_MODEL, id="gd") + + electrolyzer = create_component(model=TWO_INPUTS_CONVERTOR_MODEL, id="ez") + + database = DataBase() + + database.add_data("ez", "alpha1", ConstantData(0.7)) + database.add_data("ez", "alpha2", ConstantData(0.5)) + + database.add_data("ep1", "p_max", ConstantData(70)) + database.add_data("ep1", "cost", ConstantData(10)) + + database.add_data("ep2", "p_max", ConstantData(80)) + database.add_data("ep2", "cost", ConstantData(20)) + + database.add_data("gd", "demand", ConstantData(100)) + database.add_data("gp", "p_max", ConstantData(30)) + database.add_data("gp", "cost", ConstantData(15)) + + network = Network("test") + network.add_node(elec_node_1) + network.add_node(elec_node_2) + network.add_node(gaz_node) + network.add_component(electric_prod_1) + network.add_component(electric_prod_2) + network.add_component(gaz_prod) + network.add_component(gaz_demand) + network.add_component(electrolyzer) + + network.connect( + PortRef(electric_prod_1, "balance_port"), PortRef(elec_node_1, "balance_port") + ) + network.connect( + PortRef(elec_node_1, "balance_port"), PortRef(electrolyzer, "FlowDI1") + ) + network.connect( + PortRef(electric_prod_2, "balance_port"), PortRef(elec_node_2, "balance_port") + ) + network.connect( + PortRef(elec_node_2, "balance_port"), PortRef(electrolyzer, "FlowDI2") + ) + network.connect(PortRef(electrolyzer, "FlowDO"), PortRef(gaz_node, "balance_port")) + network.connect( + PortRef(gaz_node, "balance_port"), PortRef(gaz_demand, "balance_port") + ) + network.connect( + PortRef(gaz_prod, "balance_port"), PortRef(gaz_node, "balance_port") + ) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + output = OutputValues(problem) + ep1_gen = output.component("ep1").var("generation").value + ep2_gen = output.component("ep2").var("generation").value + gp_gen = output.component("gp").var("generation").value + print(ep1_gen) + print(ep2_gen) + print(gp_gen) + + assert math.isclose(ep1_gen, 70) + assert math.isclose(ep2_gen, 42) + assert math.isclose(gp_gen, 30) + + assert status == problem.solver.OPTIMAL + assert math.isclose(problem.solver.Objective().Value(), 1990) + + +def test_electrolyzer_n_inputs_3(): + """ + Test with a consumption_electrolyzer with two inputs + + ep1 = electric production 1 + ep2 = electric production 2 + ez = electrolyzer + gp = gaz production + + total gaz production = (flow_ep1 + flow_ep2) * alpha_ez + flow_gp + + The result is different since we only have one alpha at 0.7 + """ + elec_node_1 = Node(model=NODE_BALANCE_MODEL, id="e1") + elec_node_2 = Node(model=NODE_BALANCE_MODEL, id="e2") + gaz_node = Node(model=NODE_BALANCE_MODEL, id="g") + + electric_prod_1 = create_component(model=GENERATOR_MODEL, id="ep1") + electric_prod_2 = create_component(model=GENERATOR_MODEL, id="ep2") + + gaz_prod = create_component(model=GENERATOR_MODEL, id="gp") + gaz_demand = create_component(model=DEMAND_MODEL, id="gd") + + electrolyzer = create_component(model=CONVERTOR_MODEL, id="ez") + consumption_electrolyzer = create_component( + model=DECOMPOSE_1_FLOW_INTO_2_FLOW, id="ce" + ) + + database = DataBase() + + database.add_data("ez", "alpha", ConstantData(0.7)) + + database.add_data("ep1", "p_max", ConstantData(70)) + database.add_data("ep1", "cost", ConstantData(10)) + + database.add_data("ep2", "p_max", ConstantData(80)) + database.add_data("ep2", "cost", ConstantData(20)) + + database.add_data("gd", "demand", ConstantData(100)) + database.add_data("gp", "p_max", ConstantData(30)) + database.add_data("gp", "cost", ConstantData(15)) + + network = Network("test") + network.add_node(elec_node_1) + network.add_node(elec_node_2) + network.add_node(gaz_node) + network.add_component(electric_prod_1) + network.add_component(electric_prod_2) + network.add_component(gaz_prod) + network.add_component(gaz_demand) + network.add_component(electrolyzer) + network.add_component(consumption_electrolyzer) + + network.connect( + PortRef(electric_prod_1, "balance_port"), PortRef(elec_node_1, "balance_port") + ) + network.connect( + PortRef(elec_node_1, "balance_port"), + PortRef(consumption_electrolyzer, "FlowDI1"), + ) + network.connect( + PortRef(electric_prod_2, "balance_port"), PortRef(elec_node_2, "balance_port") + ) + network.connect( + PortRef(elec_node_2, "balance_port"), + PortRef(consumption_electrolyzer, "FlowDI2"), + ) + network.connect( + PortRef(consumption_electrolyzer, "FlowDO"), PortRef(electrolyzer, "FlowDI") + ) + network.connect(PortRef(electrolyzer, "FlowDO"), PortRef(gaz_node, "balance_port")) + network.connect( + PortRef(gaz_node, "balance_port"), PortRef(gaz_demand, "balance_port") + ) + network.connect( + PortRef(gaz_prod, "balance_port"), PortRef(gaz_node, "balance_port") + ) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + output = OutputValues(problem) + ep1_gen = output.component("ep1").var("generation").value + ep2_gen = output.component("ep2").var("generation").value + gp_gen = output.component("gp").var("generation").value + + assert math.isclose(ep1_gen, 70) + assert math.isclose(ep2_gen, 30) + assert math.isclose(gp_gen, 30) + + assert status == problem.solver.OPTIMAL + assert math.isclose(problem.solver.Objective().Value(), 1750) + + +def test_electrolyzer_n_inputs_4(): + """ + Test with one electrolyzer with one input that takes every inputs + + ep1 = electric production 1 + ep2 = electric production 2 + ez = electrolyzer + gp = gaz production + + total gaz production = (flow_ep1 + flow_ep2) * alpha_ez + flow_gp + + same as test 3, the result is different than the first two since we only have one alpha at 0.7 + """ + elec_node_1 = Node(model=NODE_BALANCE_MODEL_MOD, id="e1") + elec_node_2 = Node(model=NODE_BALANCE_MODEL_MOD, id="e2") + gaz_node = Node(model=NODE_BALANCE_MODEL, id="g") + + electric_prod_1 = create_component(model=GENERATOR_MODEL, id="ep1") + electric_prod_2 = create_component(model=GENERATOR_MODEL, id="ep2") + + gaz_prod = create_component(model=GENERATOR_MODEL, id="gp") + gaz_demand = create_component(model=DEMAND_MODEL, id="gd") + + electrolyzer = create_component(model=CONVERTOR_RECEIVE_IN, id="ez") + + database = DataBase() + + database.add_data("ez", "alpha", ConstantData(0.7)) + + database.add_data("ep1", "p_max", ConstantData(70)) + database.add_data("ep1", "cost", ConstantData(10)) + + database.add_data("ep2", "p_max", ConstantData(80)) + database.add_data("ep2", "cost", ConstantData(20)) + + database.add_data("gd", "demand", ConstantData(100)) + database.add_data("gp", "p_max", ConstantData(30)) + database.add_data("gp", "cost", ConstantData(15)) + + network = Network("test") + network.add_node(elec_node_1) + network.add_node(elec_node_2) + network.add_node(gaz_node) + network.add_component(electric_prod_1) + network.add_component(electric_prod_2) + network.add_component(gaz_prod) + network.add_component(gaz_demand) + network.add_component(electrolyzer) + + network.connect( + PortRef(electric_prod_1, "balance_port"), PortRef(elec_node_1, "balance_port_n") + ) + network.connect( + PortRef(elec_node_1, "balance_port_e"), PortRef(electrolyzer, "FlowDI") + ) + network.connect( + PortRef(electric_prod_2, "balance_port"), PortRef(elec_node_2, "balance_port_n") + ) + network.connect( + PortRef(elec_node_2, "balance_port_e"), PortRef(electrolyzer, "FlowDI") + ) + network.connect(PortRef(electrolyzer, "FlowDO"), PortRef(gaz_node, "balance_port")) + network.connect( + PortRef(gaz_node, "balance_port"), PortRef(gaz_demand, "balance_port") + ) + network.connect( + PortRef(gaz_prod, "balance_port"), PortRef(gaz_node, "balance_port") + ) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + + output = OutputValues(problem) + ep1_gen = output.component("ep1").var("generation").value + ep2_gen = output.component("ep2").var("generation").value + gp_gen = output.component("gp").var("generation").value + + assert math.isclose(ep1_gen, 70) + assert math.isclose(ep2_gen, 30) + assert math.isclose(gp_gen, 30) + + assert status == problem.solver.OPTIMAL + assert math.isclose(problem.solver.Objective().Value(), 1750) diff --git a/tests/models/test_electrolyzer_n_inputs_yaml.py b/tests/models/test_electrolyzer_n_inputs_yaml.py new file mode 100644 index 00000000..ac6e8a25 --- /dev/null +++ b/tests/models/test_electrolyzer_n_inputs_yaml.py @@ -0,0 +1,448 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. + +import math +from pathlib import Path + +from andromede.simulation import OutputValues, TimeBlock, build_problem +from andromede.study import ( + ConstantData, + DataBase, + Network, + Node, + PortRef, + create_component, +) + +""" +for every following test we have two electrical productions with an electrolyzer converting to a gaz flow +we always have: + first electric production: + - p_max = 70 + - cost = 10 + second electric production: + - p_max = 80 + - cost = 20 + gaz production: + - p_max = 30 + - cost = 15 + first conversion rate: + - alpha = 0.7 + second conversion rate: + - alpha = 0.5 + for a gaz demand of 100 +""" + + +def test_electrolyzer_n_inputs_1(data_dir: Path, lib: Path, lib_sc: Path): + """ + Test with an electrolyzer for each input + + ep1 = electric production 1 + ep2 = electric production 2 + ez1 = electrolyzer 1 + ez2 = electrolyzer 2 + gp = gaz production + + total gaz production = flow_ep1 * alpha_ez1 + flow_ep2 * alpha_ez2 + flow_gp + + """ + + gen_model = lib.models["generator"] + node_model = lib.models["node"] + convertor_model = lib_sc.models["convertor"] + demand_model = lib.models["demand"] + + elec_node_1 = Node(model=node_model, id="e1") + electric_prod_1 = create_component(model=gen_model, id="ep1") + electrolyzer1 = create_component(model=convertor_model, id="ez1") + + elec_node_2 = Node(model=node_model, id="e2") + electric_prod_2 = create_component(model=gen_model, id="ep2") + electrolyzer2 = create_component(model=convertor_model, id="ez2") + + gaz_node = Node(model=node_model, id="g") + gaz_prod = create_component(model=gen_model, id="gp") + gaz_demand = create_component(model=demand_model, id="gd") + + database = DataBase() + + database.add_data("ep1", "p_max", ConstantData(70)) + database.add_data("ep1", "cost", ConstantData(10)) + database.add_data("ez1", "alpha", ConstantData(0.7)) + + database.add_data("ep2", "p_max", ConstantData(80)) + database.add_data("ep2", "cost", ConstantData(20)) + database.add_data("ez2", "alpha", ConstantData(0.5)) + + database.add_data("gd", "demand", ConstantData(100)) + database.add_data("gp", "p_max", ConstantData(30)) + database.add_data("gp", "cost", ConstantData(15)) + + network = Network("test") + network.add_node(elec_node_1) + network.add_component(electric_prod_1) + network.add_component(electrolyzer1) + network.add_node(elec_node_2) + network.add_component(electric_prod_2) + network.add_component(electrolyzer2) + network.add_node(gaz_node) + network.add_component(gaz_prod) + network.add_component(gaz_demand) + + network.connect( + PortRef(electric_prod_1, "injection_port"), + PortRef(elec_node_1, "injection_port"), + ) + network.connect( + PortRef(elec_node_1, "injection_port"), PortRef(electrolyzer1, "input_port") + ) + network.connect( + PortRef(electrolyzer1, "output_port"), PortRef(gaz_node, "injection_port") + ) + network.connect( + PortRef(electric_prod_2, "injection_port"), + PortRef(elec_node_2, "injection_port"), + ) + network.connect( + PortRef(elec_node_2, "injection_port"), PortRef(electrolyzer2, "input_port") + ) + network.connect( + PortRef(electrolyzer2, "output_port"), PortRef(gaz_node, "injection_port") + ) + network.connect( + PortRef(gaz_node, "injection_port"), PortRef(gaz_demand, "injection_port") + ) + network.connect( + PortRef(gaz_prod, "injection_port"), PortRef(gaz_node, "injection_port") + ) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + output = OutputValues(problem) + ep1_gen = output.component("ep1").var("generation").value + ep2_gen = output.component("ep2").var("generation").value + gp_gen = output.component("gp").var("generation").value + print(ep1_gen) + print(ep2_gen) + print(gp_gen) + + assert math.isclose(ep1_gen, 70) + assert math.isclose(ep2_gen, 42) + assert math.isclose(gp_gen, 30) + + assert status == problem.solver.OPTIMAL + assert math.isclose(problem.solver.Objective().Value(), 1990) + + +def test_electrolyzer_n_inputs_2(data_dir: Path, lib: Path, lib_sc: Path): + """ + Test with one electrolyzer that has two inputs + + ep1 = electric production 1 + ep2 = electric production 2 + ez = electrolyzer + gp = gaz production + + total gaz production = flow_ep1 * alpha1_ez + flow_ep2 * alpha2_ez + flow_gp + """ + + gen_model = lib.models["generator"] + node_model = lib.models["node"] + convertor_model = lib_sc.models["two_input_convertor"] + demand_model = lib.models["demand"] + + elec_node_1 = Node(model=node_model, id="e1") + elec_node_2 = Node(model=node_model, id="e2") + gaz_node = Node(model=node_model, id="g") + + electric_prod_1 = create_component(model=gen_model, id="ep1") + electric_prod_2 = create_component(model=gen_model, id="ep2") + + gaz_prod = create_component(model=gen_model, id="gp") + gaz_demand = create_component(model=demand_model, id="gd") + + electrolyzer = create_component(model=convertor_model, id="ez") + + database = DataBase() + + database.add_data("ez", "alpha1", ConstantData(0.7)) + database.add_data("ez", "alpha2", ConstantData(0.5)) + + database.add_data("ep1", "p_max", ConstantData(70)) + database.add_data("ep1", "cost", ConstantData(10)) + + database.add_data("ep2", "p_max", ConstantData(80)) + database.add_data("ep2", "cost", ConstantData(20)) + + database.add_data("gd", "demand", ConstantData(100)) + database.add_data("gp", "p_max", ConstantData(30)) + database.add_data("gp", "cost", ConstantData(15)) + + network = Network("test") + network.add_node(elec_node_1) + network.add_node(elec_node_2) + network.add_node(gaz_node) + network.add_component(electric_prod_1) + network.add_component(electric_prod_2) + network.add_component(gaz_prod) + network.add_component(gaz_demand) + network.add_component(electrolyzer) + + network.connect( + PortRef(electric_prod_1, "injection_port"), + PortRef(elec_node_1, "injection_port"), + ) + network.connect( + PortRef(elec_node_1, "injection_port"), PortRef(electrolyzer, "input_port1") + ) + network.connect( + PortRef(electric_prod_2, "injection_port"), + PortRef(elec_node_2, "injection_port"), + ) + network.connect( + PortRef(elec_node_2, "injection_port"), PortRef(electrolyzer, "input_port2") + ) + network.connect( + PortRef(electrolyzer, "output_port"), PortRef(gaz_node, "injection_port") + ) + network.connect( + PortRef(gaz_node, "injection_port"), PortRef(gaz_demand, "injection_port") + ) + network.connect( + PortRef(gaz_prod, "injection_port"), PortRef(gaz_node, "injection_port") + ) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + output = OutputValues(problem) + ep1_gen = output.component("ep1").var("generation").value + ep2_gen = output.component("ep2").var("generation").value + gp_gen = output.component("gp").var("generation").value + print(ep1_gen) + print(ep2_gen) + print(gp_gen) + + assert math.isclose(ep1_gen, 70) + assert math.isclose(ep2_gen, 42) + assert math.isclose(gp_gen, 30) + + assert status == problem.solver.OPTIMAL + assert math.isclose(problem.solver.Objective().Value(), 1990) + + +def test_electrolyzer_n_inputs_3(data_dir: Path, lib: Path, lib_sc: Path): + """ + Test with a consumption_electrolyzer with two inputs + + ep1 = electric production 1 + ep2 = electric production 2 + ez = electrolyzer + gp = gaz production + + total gaz production = (flow_ep1 + flow_ep2) * alpha_ez + flow_gp + + The result is different since we only have one alpha at 0.7 + """ + + gen_model = lib.models["generator"] + node_model = lib.models["node"] + convertor_model = lib_sc.models["convertor"] + demand_model = lib.models["demand"] + decompose_flow_model = lib_sc.models["decompose_1_flow_into_2_flow"] + + elec_node_1 = Node(model=node_model, id="e1") + elec_node_2 = Node(model=node_model, id="e2") + gaz_node = Node(model=node_model, id="g") + + electric_prod_1 = create_component(model=gen_model, id="ep1") + electric_prod_2 = create_component(model=gen_model, id="ep2") + + gaz_prod = create_component(model=gen_model, id="gp") + gaz_demand = create_component(model=demand_model, id="gd") + + electrolyzer = create_component(model=convertor_model, id="ez") + consumption_electrolyzer = create_component(model=decompose_flow_model, id="ce") + + database = DataBase() + + database.add_data("ez", "alpha", ConstantData(0.7)) + + database.add_data("ep1", "p_max", ConstantData(70)) + database.add_data("ep1", "cost", ConstantData(10)) + + database.add_data("ep2", "p_max", ConstantData(80)) + database.add_data("ep2", "cost", ConstantData(20)) + + database.add_data("gd", "demand", ConstantData(100)) + database.add_data("gp", "p_max", ConstantData(30)) + database.add_data("gp", "cost", ConstantData(15)) + + network = Network("test") + network.add_node(elec_node_1) + network.add_node(elec_node_2) + network.add_node(gaz_node) + network.add_component(electric_prod_1) + network.add_component(electric_prod_2) + network.add_component(gaz_prod) + network.add_component(gaz_demand) + network.add_component(electrolyzer) + network.add_component(consumption_electrolyzer) + + network.connect( + PortRef(electric_prod_1, "injection_port"), + PortRef(elec_node_1, "injection_port"), + ) + network.connect( + PortRef(elec_node_1, "injection_port"), + PortRef(consumption_electrolyzer, "input_port1"), + ) + network.connect( + PortRef(electric_prod_2, "injection_port"), + PortRef(elec_node_2, "injection_port"), + ) + network.connect( + PortRef(elec_node_2, "injection_port"), + PortRef(consumption_electrolyzer, "input_port2"), + ) + network.connect( + PortRef(consumption_electrolyzer, "output_port"), + PortRef(electrolyzer, "input_port"), + ) + network.connect( + PortRef(electrolyzer, "output_port"), PortRef(gaz_node, "injection_port") + ) + network.connect( + PortRef(gaz_node, "injection_port"), PortRef(gaz_demand, "injection_port") + ) + network.connect( + PortRef(gaz_prod, "injection_port"), PortRef(gaz_node, "injection_port") + ) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + output = OutputValues(problem) + ep1_gen = output.component("ep1").var("generation").value + ep2_gen = output.component("ep2").var("generation").value + gp_gen = output.component("gp").var("generation").value + + assert math.isclose(ep1_gen, 70) + assert math.isclose(ep2_gen, 30) + assert math.isclose(gp_gen, 30) + + assert status == problem.solver.OPTIMAL + assert math.isclose(problem.solver.Objective().Value(), 1750) + + +def test_electrolyzer_n_inputs_4(data_dir: Path, lib: Path, lib_sc: Path): + """ + Test with one electrolyzer with one input that takes every inputs + + ep1 = electric production 1 + ep2 = electric production 2 + ez = electrolyzer + gp = gaz production + + total gaz production = (flow_ep1 + flow_ep2) * alpha_ez + flow_gp + + same as test 3, the result is different than the first two since we only have one alpha at 0.7 + """ + + gen_model = lib.models["generator"] + node_model = lib.models["node"] + node_mod_model = lib_sc.models["node_mod"] + convertor_model = lib_sc.models["convertor_receive_in"] + demand_model = lib.models["demand"] + + elec_node_1 = Node(model=node_mod_model, id="e1") + elec_node_2 = Node(model=node_mod_model, id="e2") + gaz_node = Node(model=node_model, id="g") + + electric_prod_1 = create_component(model=gen_model, id="ep1") + electric_prod_2 = create_component(model=gen_model, id="ep2") + + gaz_prod = create_component(model=gen_model, id="gp") + gaz_demand = create_component(model=demand_model, id="gd") + + electrolyzer = create_component(model=convertor_model, id="ez") + + database = DataBase() + + database.add_data("ez", "alpha", ConstantData(0.7)) + + database.add_data("ep1", "p_max", ConstantData(70)) + database.add_data("ep1", "cost", ConstantData(10)) + + database.add_data("ep2", "p_max", ConstantData(80)) + database.add_data("ep2", "cost", ConstantData(20)) + + database.add_data("gd", "demand", ConstantData(100)) + database.add_data("gp", "p_max", ConstantData(30)) + database.add_data("gp", "cost", ConstantData(15)) + + network = Network("test") + network.add_node(elec_node_1) + network.add_node(elec_node_2) + network.add_node(gaz_node) + network.add_component(electric_prod_1) + network.add_component(electric_prod_2) + network.add_component(gaz_prod) + network.add_component(gaz_demand) + network.add_component(electrolyzer) + + network.connect( + PortRef(electric_prod_1, "injection_port"), + PortRef(elec_node_1, "injection_port_n"), + ) + network.connect( + PortRef(elec_node_1, "injection_port_e"), PortRef(electrolyzer, "input_port") + ) + network.connect( + PortRef(electric_prod_2, "injection_port"), + PortRef(elec_node_2, "injection_port_n"), + ) + network.connect( + PortRef(elec_node_2, "injection_port_e"), PortRef(electrolyzer, "input_port") + ) + network.connect( + PortRef(electrolyzer, "output_port"), PortRef(gaz_node, "injection_port") + ) + network.connect( + PortRef(gaz_node, "injection_port"), PortRef(gaz_demand, "injection_port") + ) + network.connect( + PortRef(gaz_prod, "injection_port"), PortRef(gaz_node, "injection_port") + ) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + + output = OutputValues(problem) + ep1_gen = output.component("ep1").var("generation").value + ep2_gen = output.component("ep2").var("generation").value + gp_gen = output.component("gp").var("generation").value + + assert math.isclose(ep1_gen, 70) + assert math.isclose(ep2_gen, 30) + assert math.isclose(gp_gen, 30) + + assert status == problem.solver.OPTIMAL + assert math.isclose(problem.solver.Objective().Value(), 1750) diff --git a/tests/models/test_quota_co2.py b/tests/models/test_quota_co2.py new file mode 100644 index 00000000..07f50e58 --- /dev/null +++ b/tests/models/test_quota_co2.py @@ -0,0 +1,94 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. + +import math + +from andromede.libs.standard import DEMAND_MODEL, LINK_MODEL, NODE_BALANCE_MODEL +from andromede.libs.standard_sc import C02_POWER_MODEL, QUOTA_CO2_MODEL +from andromede.simulation import OutputValues, TimeBlock, build_problem +from andromede.study import ( + ConstantData, + DataBase, + Network, + Node, + PortRef, + create_component, +) + +""" +build the quota CO² test system. + + N1 -----N2----Demand ^ + | | + Oil1 Coal1 + | | + --------- + | + QuotaCO2 + +""" +""" Test of a generation of energy and co2 with a quota to limit the emission""" + + +def test_quota_co2(): + n1 = Node(model=NODE_BALANCE_MODEL, id="N1") + n2 = Node(model=NODE_BALANCE_MODEL, id="N2") + oil1 = create_component(model=C02_POWER_MODEL, id="Oil1") + coal1 = create_component(model=C02_POWER_MODEL, id="Coal1") + l12 = create_component(model=LINK_MODEL, id="L12") + demand = create_component(model=DEMAND_MODEL, id="Demand") + monQuotaCO2 = create_component(model=QUOTA_CO2_MODEL, id="QuotaCO2") + + network = Network("test") + network.add_node(n1) + network.add_node(n2) + network.add_component(oil1) + network.add_component(coal1) + network.add_component(l12) + network.add_component(demand) + network.add_component(monQuotaCO2) + + network.connect(PortRef(demand, "balance_port"), PortRef(n2, "balance_port")) + network.connect(PortRef(n2, "balance_port"), PortRef(l12, "balance_port_from")) + network.connect(PortRef(l12, "balance_port_to"), PortRef(n1, "balance_port")) + network.connect(PortRef(n1, "balance_port"), PortRef(oil1, "FlowP")) + network.connect(PortRef(n2, "balance_port"), PortRef(coal1, "FlowP")) + network.connect(PortRef(oil1, "OutCO2"), PortRef(monQuotaCO2, "emissionCO2")) + network.connect(PortRef(coal1, "OutCO2"), PortRef(monQuotaCO2, "emissionCO2")) + + database = DataBase() + database.add_data("Demand", "demand", ConstantData(100)) + database.add_data("Coal1", "p_min", ConstantData(0)) + database.add_data("Oil1", "p_min", ConstantData(0)) + database.add_data("Coal1", "p_max", ConstantData(100)) + database.add_data("Oil1", "p_max", ConstantData(100)) + database.add_data("Coal1", "emission_rate", ConstantData(2)) + database.add_data("Oil1", "emission_rate", ConstantData(1)) + database.add_data("Coal1", "cost", ConstantData(10)) + database.add_data("Oil1", "cost", ConstantData(100)) + database.add_data("L12", "f_max", ConstantData(100)) + database.add_data("QuotaCO2", "quota", ConstantData(150)) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + output = OutputValues(problem) + oil1_p = output.component("Oil1").var("p").value + coal1_p = output.component("Coal1").var("p").value + l12_flow = output.component("L12").var("flow").value + + assert status == problem.solver.OPTIMAL + assert math.isclose(problem.solver.Objective().Value(), 5500) + assert math.isclose(oil1_p, 50) + assert math.isclose(coal1_p, 50) + assert math.isclose(l12_flow, -50) diff --git a/tests/models/test_quota_co2_yaml.py b/tests/models/test_quota_co2_yaml.py new file mode 100644 index 00000000..f11a2369 --- /dev/null +++ b/tests/models/test_quota_co2_yaml.py @@ -0,0 +1,99 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. + +import math +from pathlib import Path + +from andromede.simulation import OutputValues, TimeBlock, build_problem +from andromede.study import ( + ConstantData, + DataBase, + Network, + Node, + PortRef, + create_component, +) + +""" +build the quota CO² test system. + + N1 -----N2----Demand + | | + Oil1 Coal1 + | | + --------- + | + QuotaCO2 + +""" +""" Test of a generation of energy and co2 with a quota to limit the emission""" + + +def test_quota_co2(data_dir: Path, lib: Path, lib_sc: Path): + gen_model = lib_sc.models["generator_with_co2"] + node_model = lib.models["node"] + quota_co2_model = lib_sc.models["quota_co2"] + demand_model = lib.models["demand"] + link_model = lib_sc.models["link"] + + n1 = Node(model=node_model, id="N1") + n2 = Node(model=node_model, id="N2") + oil1 = create_component(model=gen_model, id="Oil1") + coal1 = create_component(model=gen_model, id="Coal1") + l12 = create_component(model=link_model, id="L12") + demand = create_component(model=demand_model, id="Demand") + monQuotaCO2 = create_component(model=quota_co2_model, id="QuotaCO2") + + network = Network("test") + network.add_node(n1) + network.add_node(n2) + network.add_component(oil1) + network.add_component(coal1) + network.add_component(l12) + network.add_component(demand) + network.add_component(monQuotaCO2) + + network.connect(PortRef(demand, "injection_port"), PortRef(n2, "injection_port")) + network.connect(PortRef(n2, "injection_port"), PortRef(l12, "injection_port_from")) + network.connect(PortRef(l12, "injection_port_to"), PortRef(n1, "injection_port")) + network.connect(PortRef(n1, "injection_port"), PortRef(oil1, "injection_port")) + network.connect(PortRef(n2, "injection_port"), PortRef(coal1, "injection_port")) + network.connect(PortRef(oil1, "co2_port"), PortRef(monQuotaCO2, "emission_port")) + network.connect(PortRef(coal1, "co2_port"), PortRef(monQuotaCO2, "emission_port")) + + database = DataBase() + database.add_data("Demand", "demand", ConstantData(100)) + database.add_data("Coal1", "pmin", ConstantData(0)) + database.add_data("Oil1", "pmin", ConstantData(0)) + database.add_data("Coal1", "pmax", ConstantData(100)) + database.add_data("Oil1", "pmax", ConstantData(100)) + database.add_data("Coal1", "emission_rate", ConstantData(2)) + database.add_data("Oil1", "emission_rate", ConstantData(1)) + database.add_data("Coal1", "cost", ConstantData(10)) + database.add_data("Oil1", "cost", ConstantData(100)) + database.add_data("L12", "f_max", ConstantData(100)) + database.add_data("QuotaCO2", "quota", ConstantData(150)) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + + output = OutputValues(problem) + oil1_p = output.component("Oil1").var("p").value + coal1_p = output.component("Coal1").var("p").value + l12_flow = output.component("L12").var("flow").value + + assert status == problem.solver.OPTIMAL + assert math.isclose(problem.solver.Objective().Value(), 5500) + assert math.isclose(oil1_p, 50) + assert math.isclose(coal1_p, 50) + assert math.isclose(l12_flow, -50) diff --git a/tests/models/test_short_term_storage_complex.py b/tests/models/test_short_term_storage_complex.py new file mode 100644 index 00000000..0d23c050 --- /dev/null +++ b/tests/models/test_short_term_storage_complex.py @@ -0,0 +1,204 @@ +import math + +import pandas as pd + +from andromede.libs.standard import ( + BALANCE_PORT_TYPE, + DEMAND_MODEL, + GENERATOR_MODEL, + GENERATOR_MODEL_WITH_PMIN, + LINK_MODEL, + NODE_BALANCE_MODEL, + SPILLAGE_MODEL, + THERMAL_CLUSTER_MODEL_HD, + UNSUPPLIED_ENERGY_MODEL, +) +from andromede.libs.standard_sc import SHORT_TERM_STORAGE_COMPLEX +from andromede.simulation import BlockBorderManagement, TimeBlock, build_problem +from andromede.study import ( + ConstantData, + DataBase, + Network, + Node, + PortRef, + TimeScenarioIndex, + TimeScenarioSeriesData, + create_component, +) + + +def generate_data( + efficiency: float, horizon: int, scenarios: int +) -> TimeScenarioSeriesData: + # Create an empty DataFrame with index being the range of the horizon + data = pd.DataFrame(index=range(horizon)) + + for scenario in range(scenarios): + # Create a column name based on the scenario number + column_name = f"scenario_{scenario}" + data[column_name] = 0 # Initialize the column with zeros + + for absolute_timestep in range(horizon): + if absolute_timestep == 0: + data.at[absolute_timestep, column_name] = -18 + else: + data.at[absolute_timestep, column_name] = 2 * efficiency + + # Return as TimeScenarioSeriesData object + return TimeScenarioSeriesData(time_scenario_series=data) + + +def short_term_storage_base(efficiency: float, horizon: int, result: int) -> None: + # 18 produced in the 1st time-step, then consumed 2 * efficiency in the rest + time_blocks = [TimeBlock(0, list(range(horizon)))] + scenarios = 1 + database = DataBase() + + database.add_data("D", "demand", generate_data(efficiency, horizon, scenarios)) + + database.add_data("U", "cost", ConstantData(10)) + database.add_data("S", "cost", ConstantData(1)) + + database.add_data("STS1", "p_max_injection", ConstantData(100)) + database.add_data("STS1", "p_max_withdrawal", ConstantData(50)) + database.add_data("STS1", "level_min", ConstantData(0)) + database.add_data("STS1", "level_max", ConstantData(1000)) + database.add_data("STS1", "inflows", ConstantData(0)) + database.add_data("STS1", "efficiency", ConstantData(efficiency)) + database.add_data("STS1", "withdrawal_penality", ConstantData(5)) + database.add_data("STS1", "level_penality", ConstantData(0)) + database.add_data("STS1", "Pgrad+i_penality", ConstantData(0)) + database.add_data("STS1", "Pgrad-i_penality", ConstantData(0)) + database.add_data("STS1", "Pgrad+s_penality", ConstantData(0)) + database.add_data("STS1", "Pgrad-s_penality", ConstantData(0)) + + node = Node(model=NODE_BALANCE_MODEL, id="1") + spillage = create_component(model=SPILLAGE_MODEL, id="S") + + unsupplied = create_component(model=UNSUPPLIED_ENERGY_MODEL, id="U") + + demand = create_component(model=DEMAND_MODEL, id="D") + + short_term_storage = create_component( + model=SHORT_TERM_STORAGE_COMPLEX, + id="STS1", + ) + + network = Network("test") + network.add_node(node) + for component in [demand, short_term_storage, spillage, unsupplied]: + network.add_component(component) + network.connect(PortRef(demand, "balance_port"), PortRef(node, "balance_port")) + network.connect( + PortRef(short_term_storage, "balance_port"), PortRef(node, "balance_port") + ) + network.connect(PortRef(spillage, "balance_port"), PortRef(node, "balance_port")) + network.connect(PortRef(unsupplied, "balance_port"), PortRef(node, "balance_port")) + + border_management = BlockBorderManagement.CYCLE + problem = build_problem( + network, + database, + time_blocks[0], + scenarios, + border_management=BlockBorderManagement.CYCLE, + ) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + + assert math.isclose(problem.solver.Objective().Value(), result) + + count_variables = 0 + for variable in problem.solver.variables(): + if "injection" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 100 + print(variable.name()) + print(variable.solution_value()) + elif "withdrawal" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 50 + print(variable.name()) + print(variable.solution_value()) + elif "level" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 1000 + print(variable.name()) + print(variable.solution_value()) + + assert count_variables == 3 * horizon + + database.add_data("STS1", "withdrawal_penality", ConstantData(0)) + database.add_data("STS1", "level_penality", ConstantData(5)) + database.add_data("STS1", "Pgrad+i_penality", ConstantData(0)) + database.add_data("STS1", "Pgrad-i_penality", ConstantData(0)) + database.add_data("STS1", "Pgrad+s_penality", ConstantData(0)) + database.add_data("STS1", "Pgrad-s_penality", ConstantData(0)) + + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + + assert math.isclose(problem.solver.Objective().Value(), result) + + count_variables = 0 + for variable in problem.solver.variables(): + if "injection" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 100 + print(variable.name()) + print(variable.solution_value()) + elif "withdrawal" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 50 + print(variable.name()) + print(variable.solution_value()) + elif "level" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 1000 + print(variable.name()) + print(variable.solution_value()) + + assert count_variables == 3 * horizon + + database.add_data("STS1", "withdrawal_penality", ConstantData(0)) + database.add_data("STS1", "level_penality", ConstantData(0)) + database.add_data("STS1", "Pgrad+i_penality", ConstantData(5)) + database.add_data("STS1", "Pgrad-i_penality", ConstantData(0)) + database.add_data("STS1", "Pgrad+s_penality", ConstantData(0)) + database.add_data("STS1", "Pgrad-s_penality", ConstantData(0)) + + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + + assert math.isclose(problem.solver.Objective().Value(), result) + + count_variables = 0 + for variable in problem.solver.variables(): + if "injection" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 100 + print(variable.name()) + print(variable.solution_value()) + elif "withdrawal" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 50 + print(variable.name()) + print(variable.solution_value()) + elif "level" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 1000 + print(variable.name()) + print(variable.solution_value()) + + assert count_variables == 3 * horizon + + +def test_short_test_horizon_10() -> None: + short_term_storage_base(efficiency=0.8, horizon=10, result=72) + + +def test_short_test_horizon_5() -> None: + short_term_storage_base(efficiency=0.2, horizon=5, result=18) diff --git a/tests/unittests/conftest.py b/tests/unittests/conftest.py new file mode 100644 index 00000000..fe667438 --- /dev/null +++ b/tests/unittests/conftest.py @@ -0,0 +1,19 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +from pathlib import Path + +import pytest + + +@pytest.fixture(scope="session") +def data_dir() -> Path: + return Path(__file__).parent / "data" diff --git a/tests/unittests/data/components.yml b/tests/unittests/data/components.yml new file mode 100644 index 00000000..b5ae7570 --- /dev/null +++ b/tests/unittests/data/components.yml @@ -0,0 +1,46 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +study: + nodes: + - id: N + model: node + + components: + - id: G + model: generator + parameters: + - name: cost + type: constant + value: 30 + - name: p_max + type: constant + value: 100 + - id: D + model: demand + parameters: + - name: demand + type: constant + value: 100 + + connections: + - component1: N + port_1: injection_port + component2: D + port_2: injection_port + + - component1: N + port_1: injection_port + component2: G + port_2: injection_port + + + diff --git a/tests/unittests/data/components_for_short_term_storage.yml b/tests/unittests/data/components_for_short_term_storage.yml new file mode 100644 index 00000000..d63f12a3 --- /dev/null +++ b/tests/unittests/data/components_for_short_term_storage.yml @@ -0,0 +1,83 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +study: + nodes: + - id: N + model: node + + components: + - id: D + model: demand + parameters: + - name: demand + type: timeseries + timeseries: demand-ts + - id: S + model: spillage + parameters: + - name: cost + type: constant + value: 1 + - id: U + model: unsupplied + parameters: + - name: cost + type: constant + value: 10 + - id: STS1 + model: short-term-storage + parameters: + - name: p_max_injection + type: constant + value: 100 + - name: p_max_withdrawal + type: constant + value: 50 + - name: level_min + type: constant + value: 0 + - name: level_max + type: constant + value: 1000 + - name: inflows + type: constant + value: 0 + - name: efficiency + type: constant + value: 0.8 + + + connections: + - component1: N + port_1: injection_port + component2: D + port_2: injection_port + + - component1: N + port_1: injection_port + component2: STS1 + port_2: injection_port + + - component1: N + port_1: injection_port + component2: U + port_2: injection_port + + - component1: N + port_1: injection_port + component2: S + port_2: injection_port + + + + + diff --git a/tests/unittests/data/demand-ts.txt b/tests/unittests/data/demand-ts.txt new file mode 100644 index 00000000..96bc6ccd --- /dev/null +++ b/tests/unittests/data/demand-ts.txt @@ -0,0 +1,10 @@ +-18.0 +1.6 +1.6 +1.6 +1.6 +1.6 +1.6 +1.6 +1.6 +1.6 \ No newline at end of file diff --git a/tests/unittests/data/gen-costs.txt b/tests/unittests/data/gen-costs.txt new file mode 100644 index 00000000..d1edf6de --- /dev/null +++ b/tests/unittests/data/gen-costs.txt @@ -0,0 +1,2 @@ +100 200 +50 100 \ No newline at end of file diff --git a/tests/unittests/data/lib.yml b/tests/unittests/data/lib.yml new file mode 100644 index 00000000..f42df312 --- /dev/null +++ b/tests/unittests/data/lib.yml @@ -0,0 +1,183 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +library: + id: basic + description: Basic library + + port-types: + - id: flow + description: A port which transfers power flow + fields: + - name: flow + + models: + + - id: generator + description: A basic generator model + parameters: + - name: cost + time-dependent: false + scenario-dependent: false + - name: p_max + time-dependent: false + scenario-dependent: false + variables: + - name: generation + lower-bound: 0 + upper-bound: p_max + ports: + - name: injection_port + type: flow + port-field-definitions: + - port: injection_port + field: flow + definition: generation + objective: expec(sum(cost * generation)) + + - id: node + description: A basic balancing node model + ports: + - name: injection_port + type: flow + binding-constraints: + - name: balance + expression: sum_connections(injection_port.flow) = 0 + - id: spillage + description: A basic spillage model + parameters: + - name: cost + time-dependent: false + scenario-dependent: false + variables: + - name: spillage + lower-bound: 0 + ports: + - name: injection_port + type: flow + port-field-definitions: + - port: injection_port + field: flow + definition: -spillage + - id: unsupplied + description: A basic unsupplied model + parameters: + - name: cost + time-dependent: false + scenario-dependent: false + variables: + - name: unsupplied_energy + lower-bound: 0 + ports: + - name: injection_port + type: flow + port-field-definitions: + - port: injection_port + field: flow + definition: unsupplied_energy + + + - id: demand + description: A basic fixed demand model + parameters: + - name: demand + time-dependent: true + scenario-dependent: true + ports: + - name: injection_port + type: flow + port-field-definitions: + - port: injection_port + field: flow + definition: -demand + + - id: short-term-storage + description: A short term storage + parameters: + - name: efficiency + - name: level_min + - name: level_max + - name: p_max_withdrawal + - name: p_max_injection + - name: inflows + variables: + - name: injection + lower-bound: 0 + upper-bound: p_max_injection + - name: withdrawal + lower-bound: 0 + upper-bound: p_max_withdrawal + - name: level + lower-bound: level_min + upper-bound: level_max + ports: + - name: injection_port + type: flow + port-field-definitions: + - port: injection_port + field: flow + definition: injection - withdrawal + constraints: + - name: Level equation + expression: level[t] - level[t-1] - efficiency * injection + withdrawal = inflows + + - id: thermal-cluster-dhd + description: DHD model for thermal cluster + parameters: + - name: cost + - name: p_min + - name: p_max + - name: d_min_up + - name: d_min_down + - name: nb_units_max + - name: nb_failures + time-dependent: true + scenario-dependent: true + variables: + - name: generation + lower-bound: 0 + upper-bound: nb_units_max * p_max + time-dependent: true + scenario-dependent: true + - name: nb_on + lower-bound: 0 + upper-bound: nb_units_max + time-dependent: true + scenario-dependent: false + - name: nb_stop + lower-bound: 0 + upper-bound: nb_units_max + time-dependent: true + scenario-dependent: false + - name: nb_start + lower-bound: 0 + upper-bound: nb_units_max + time-dependent: true + scenario-dependent: false + ports: + - name: injection_port + type: flow + port-field-definitions: + - port: injection_port + field: flow + definition: generation + constraints: + - name: Max generation + expression: generation <= nb_on * p_max + - name: Min generation + expression: generation >= nb_on * p_min + - name: Number of units variation + expression: nb_on = nb_on[-1] + nb_start - nb_stop + - name: Min up time + expression: sum(nb_start[-d_min_up + 1 .. 0]) <= nb_on + - name: Min down time + expression: sum(nb_stop[-d_min_down + 1 .. 0]) <= nb_units_max[-d_min_down] - nb_on + objective: expec(sum(cost * generation)) \ No newline at end of file diff --git a/tests/unittests/data/model_port_definition_ko.yml b/tests/unittests/data/model_port_definition_ko.yml new file mode 100644 index 00000000..73c310cc --- /dev/null +++ b/tests/unittests/data/model_port_definition_ko.yml @@ -0,0 +1,30 @@ +library: + id: basic + description: Basic library + + port-types: + - id: 1er_flow + description: first flow + fields: + - name: 1er_flow + models: + + - id: short-term-storage + description: A short term storage + parameters: + - name: p_max_withdrawal + - name: p_max_injection + variables: + - name: injection + lower-bound: 0 + upper-bound: p_max_injection + - name: withdrawal + lower-bound: 0 + upper-bound: p_max_withdrawal + ports: + - name: injection_port_1 + type: 1er_flow + constraints: + - name: Level equation + expression: injection_port_1.1er_flow = withdrawal + diff --git a/tests/unittests/data/model_port_definition_ok.yml b/tests/unittests/data/model_port_definition_ok.yml new file mode 100644 index 00000000..57b6512f --- /dev/null +++ b/tests/unittests/data/model_port_definition_ok.yml @@ -0,0 +1,30 @@ +library: + id: basic + description: Basic library + + port-types: + - id: flow + description: second flow + fields: + - name: flow + models: + + - id: short-term-storage-2 + description: A short term storage + parameters: + - name: p_max_withdrawal + - name: p_max_injection + variables: + - name: injection + lower-bound: 0 + upper-bound: p_max_injection + - name: withdrawal + lower-bound: 0 + upper-bound: p_max_withdrawal + ports: + - name: injection_port + type: flow + constraints: + - name: Level equation + expression: injection_port.flow = withdrawal + diff --git a/tests/unittests/expressions/parsing/__init__.py b/tests/unittests/expressions/parsing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/expressions/parsing/test_expression_parsing.py b/tests/unittests/expressions/parsing/test_expression_parsing.py new file mode 100644 index 00000000..13fb04a3 --- /dev/null +++ b/tests/unittests/expressions/parsing/test_expression_parsing.py @@ -0,0 +1,184 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +from typing import Set + +import pytest + +from andromede.expression import ExpressionNode, literal, param, print_expr, var +from andromede.expression.equality import expressions_equal +from andromede.expression.expression import ExpressionRange, port_field +from andromede.expression.parsing.parse_expression import ( + AntaresParseException, + ModelIdentifiers, + parse_expression, +) + + +@pytest.mark.parametrize( + "variables, parameters, expression_str, expected", + [ + ({}, {}, "1 + 2", literal(1) + 2), + ({}, {}, "1 - 2", literal(1) - 2), + ({}, {}, "1 - 3 + 4 - 2", literal(1) - 3 + 4 - 2), + ( + {"x"}, + {"p"}, + "1 + 2 * x = p", + literal(1) + 2 * var("x") == param("p"), + ), + ( + {}, + {}, + "port.f <= 0", + port_field("port", "f") <= 0, + ), + ({"x"}, {}, "sum(x)", var("x").sum()), + ({"x"}, {}, "x[-1]", var("x").eval(-literal(1))), + ( + {"x"}, + {}, + "x[-1..5]", + var("x").eval(ExpressionRange(-literal(1), literal(5))), + ), + ({"x"}, {}, "x[1]", var("x").eval(1)), + ({"x"}, {}, "x[t-1]", var("x").shift(-literal(1))), + ( + {"x"}, + {}, + "x[t-1, t+4]", + var("x").shift([-literal(1), literal(4)]), + ), + ( + {"x"}, + {}, + "x[t-1+1]", + var("x").shift(-literal(1) + literal(1)), + ), + ( + {"x"}, + {"d"}, + "x[t-d+1]", + var("x").shift(-param("d") + literal(1)), + ), + ( + {"x"}, + {"d"}, + "x[t-2*d+1]", + var("x").shift(-literal(2) * param("d") + literal(1)), + ), + ( + {"x"}, + {"d"}, + "x[t-1+d*2]", + var("x").shift(-literal(1) + param("d") * literal(2)), + ), + ( + {"x"}, + {"d"}, + "x[t-2-d+1]", + var("x").shift(-literal(2) - param("d") + literal(1)), + ), + ( + {"x"}, + {}, + "x[t-1, t, t+4]", + var("x").shift([-literal(1), literal(0), literal(4)]), + ), + ( + {"x"}, + {}, + "x[t-1..t+5]", + var("x").shift(ExpressionRange(-literal(1), literal(5))), + ), + ( + {"x"}, + {}, + "x[t-1..t]", + var("x").shift(ExpressionRange(-literal(1), literal(0))), + ), + ( + {"x"}, + {}, + "x[t..t+5]", + var("x").shift(ExpressionRange(literal(0), literal(5))), + ), + ({"x"}, {}, "x[t]", var("x")), + ({"x"}, {"p"}, "x[t+p]", var("x").shift(param("p"))), + ( + {"x"}, + {}, + "sum(x[-1..5])", + var("x").eval(ExpressionRange(-literal(1), literal(5))).sum(), + ), + ({}, {}, "sum_connections(port.f)", port_field("port", "f").sum_connections()), + ( + {"level", "injection", "withdrawal"}, + {"inflows", "efficiency"}, + "level - level[-1] - efficiency * injection + withdrawal = inflows", + var("level") + - var("level").eval(-literal(1)) + - param("efficiency") * var("injection") + + var("withdrawal") + == param("inflows"), + ), + ( + {"nb_start", "nb_on"}, + {"d_min_up"}, + "sum(nb_start[-d_min_up + 1 .. 0]) <= nb_on", + var("nb_start") + .eval(ExpressionRange(-param("d_min_up") + 1, literal(0))) + .sum() + <= var("nb_on"), + ), + ( + {"generation"}, + {"cost"}, + "expec(sum(cost * generation))", + (param("cost") * var("generation")).sum().expec(), + ), + ], +) +def test_parsing_visitor( + variables: Set[str], + parameters: Set[str], + expression_str: str, + expected: ExpressionNode, +): + identifiers = ModelIdentifiers(variables, parameters) + expr = parse_expression(expression_str, identifiers) + print() + print(print_expr(expr)) + assert expressions_equal(expr, expected) + + +@pytest.mark.parametrize( + "expression_str", + [ + "1**3", + "1 6", + "x[t+1-t]", + "x[2*t]", + "x[t 4]", + ], +) +def test_parse_cancellation_should_throw(expression_str: str): + # Console log error is displayed ! + identifiers = ModelIdentifiers( + variables={"x"}, + parameters=set(), + ) + + with pytest.raises( + AntaresParseException, + match=r"An error occurred during parsing: ParseCancellationException", + ): + parse_expression(expression_str, identifiers) diff --git a/tests/unittests/model/__init__.py b/tests/unittests/model/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/model/test_model_parsing.py b/tests/unittests/model/test_model_parsing.py new file mode 100644 index 00000000..a9234839 --- /dev/null +++ b/tests/unittests/model/test_model_parsing.py @@ -0,0 +1,169 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# +# See AUTHORS.txt +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# SPDX-License-Identifier: MPL-2.0 +# +# This file is part of the Antares project. +import io +from pathlib import Path + +import pytest + +from andromede.expression import literal, param, var +from andromede.expression.expression import port_field +from andromede.expression.parsing.parse_expression import AntaresParseException +from andromede.libs.standard import CONSTANT +from andromede.model import ( + Constraint, + ModelPort, + PortField, + PortType, + float_parameter, + float_variable, + model, +) +from andromede.model.model import PortFieldDefinition, PortFieldId +from andromede.model.parsing import parse_yaml_library +from andromede.model.resolve_library import resolve_library + + +def test_library_parsing(data_dir: Path): + lib_file = data_dir / "lib.yml" + + with lib_file.open() as f: + input_lib = parse_yaml_library(f) + assert input_lib.id == "basic" + assert len(input_lib.models) == 7 + assert len(input_lib.port_types) == 1 + + lib = resolve_library(input_lib) + assert len(lib.models) == 7 + assert len(lib.port_types) == 1 + port_type = lib.port_types["flow"] + assert port_type == PortType(id="flow", fields=[PortField(name="flow")]) + gen_model = lib.models["generator"] + assert gen_model == model( + id="generator", + parameters=[ + float_parameter("cost", structure=CONSTANT), + float_parameter("p_max", structure=CONSTANT), + ], + variables=[ + float_variable( + "generation", lower_bound=literal(0), upper_bound=param("p_max") + ) + ], + ports=[ModelPort(port_type=port_type, port_name="injection_port")], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId(port_name="injection_port", field_name="flow"), + definition=var("generation"), + ) + ], + objective_operational_contribution=(param("cost") * var("generation")) + .sum() + .expec(), + ) + short_term_storage = lib.models["short-term-storage"] + assert short_term_storage == model( + id="short-term-storage", + parameters=[ + float_parameter("efficiency", structure=CONSTANT), + float_parameter("level_min", structure=CONSTANT), + float_parameter("level_max", structure=CONSTANT), + float_parameter("p_max_withdrawal", structure=CONSTANT), + float_parameter("p_max_injection", structure=CONSTANT), + float_parameter("inflows", structure=CONSTANT), + ], + variables=[ + float_variable( + "injection", + lower_bound=literal(0), + upper_bound=param("p_max_injection"), + ), + float_variable( + "withdrawal", + lower_bound=literal(0), + upper_bound=param("p_max_withdrawal"), + ), + float_variable( + "level", + lower_bound=param("level_min"), + upper_bound=param("level_max"), + ), + ], + ports=[ModelPort(port_type=port_type, port_name="injection_port")], + port_fields_definitions=[ + PortFieldDefinition( + port_field=PortFieldId(port_name="injection_port", field_name="flow"), + definition=var("injection") - var("withdrawal"), + ) + ], + constraints=[ + Constraint( + name="Level equation", + expression=var("level") + - var("level").shift(-literal(1)) + - param("efficiency") * var("injection") + + var("withdrawal") + == param("inflows"), + ) + ], + ) + + +def test_library_error_parsing(data_dir: Path): + lib_file = data_dir / "model_port_definition_ko.yml" + + with lib_file.open() as f: + input_lib = parse_yaml_library(f) + assert input_lib.id == "basic" + with pytest.raises( + AntaresParseException, + match=r"An error occurred during parsing: ParseCancellationException", + ): + resolve_library(input_lib) + + +def test_library_port_model_ok_parsing(data_dir: Path): + lib_file = data_dir / "model_port_definition_ok.yml" + + with lib_file.open() as f: + input_lib = parse_yaml_library(f) + assert input_lib.id == "basic" + + lib = resolve_library(input_lib) + port_type = lib.port_types["flow"] + assert port_type == PortType(id="flow", fields=[PortField(name="flow")]) + short_term_storage = lib.models["short-term-storage-2"] + assert short_term_storage == model( + id="short-term-storage-2", + parameters=[ + float_parameter("p_max_withdrawal", structure=CONSTANT), + float_parameter("p_max_injection", structure=CONSTANT), + ], + variables=[ + float_variable( + "injection", + lower_bound=literal(0), + upper_bound=param("p_max_injection"), + ), + float_variable( + "withdrawal", + lower_bound=literal(0), + upper_bound=param("p_max_withdrawal"), + ), + ], + ports=[ModelPort(port_type=port_type, port_name="injection_port")], + constraints=[ + Constraint( + name="Level equation", + expression=port_field("injection_port", "flow") == var("withdrawal"), + ) + ], + ) diff --git a/tests/unittests/study/__init__.py b/tests/unittests/study/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/study/test_components_parsing.py b/tests/unittests/study/test_components_parsing.py new file mode 100644 index 00000000..ddae9cbd --- /dev/null +++ b/tests/unittests/study/test_components_parsing.py @@ -0,0 +1,143 @@ +from pathlib import Path + +import pandas as pd +import pytest + +from andromede.model.parsing import InputLibrary, parse_yaml_library +from andromede.model.resolve_library import resolve_library +from andromede.simulation import BlockBorderManagement, TimeBlock, build_problem +from andromede.study import TimeScenarioIndex, TimeScenarioSeriesData +from andromede.study.parsing import InputComponents, parse_yaml_components +from andromede.study.resolve_components import ( + build_data_base, + build_network, + consistency_check, + resolve_components_and_cnx, +) + + +@pytest.fixture +def input_component( + data_dir: Path, +) -> InputComponents: + compo_file = data_dir / "components.yml" + + with compo_file.open() as c: + return parse_yaml_components(c) + + +@pytest.fixture +def input_library( + data_dir: Path, +) -> InputLibrary: + library = data_dir / "lib.yml" + + with library.open() as lib: + return parse_yaml_library(lib) + + +def test_parsing_components_ok(input_component, input_library): + assert len(input_component.components) == 2 + assert len(input_component.nodes) == 1 + assert len(input_component.connections) == 2 + lib = resolve_library(input_library) + result = resolve_components_and_cnx(input_component, lib) + + assert len(result.components) == 2 + assert len(result.nodes) == 1 + assert len(result.connections) == 2 + + +def test_consistency_check_ok(input_component, input_library): + result_lib = resolve_library(input_library) + result_comp = resolve_components_and_cnx(input_component, result_lib) + consistency_check(result_comp.components, result_lib.models) + + +def test_consistency_check_ko(input_component, input_library): + result_lib = resolve_library(input_library) + result_comp = resolve_components_and_cnx(input_component, result_lib) + result_lib.models.pop("generator") + with pytest.raises( + ValueError, + match=r"Error: Component G has invalid model ID: generator", + ): + consistency_check(result_comp.components, result_lib.models) + + +def test_basic_balance_using_yaml(input_component, input_library) -> None: + result_lib = resolve_library(input_library) + components_input = resolve_components_and_cnx(input_component, result_lib) + consistency_check(components_input.components, result_lib.models) + + database = build_data_base(input_component, None) + network = build_network(components_input) + + scenarios = 1 + problem = build_problem(network, database, TimeBlock(1, [0]), scenarios) + status = problem.solver.Solve() + assert status == problem.solver.OPTIMAL + assert problem.solver.Objective().Value() == 3000 + + +def generate_data_for_short_term_storage_test(scenarios: int) -> TimeScenarioSeriesData: + data = {} + horizon = 10 + efficiency = 0.8 + for scenario in range(scenarios): + for absolute_timestep in range(10): + if absolute_timestep == 0: + data[TimeScenarioIndex(absolute_timestep, scenario)] = -18 + else: + data[TimeScenarioIndex(absolute_timestep, scenario)] = 2 * efficiency + + values = [value for value in data.values()] + data_df = pd.DataFrame(values, columns=["Value"]) + return TimeScenarioSeriesData(data_df) + + +def test_short_term_storage_base_with_yaml(data_dir: Path) -> None: + compo_file = data_dir / "components_for_short_term_storage.yml" + lib_file = data_dir / "lib.yml" + with lib_file.open() as lib: + input_library = parse_yaml_library(lib) + + with compo_file.open() as c: + components_file = parse_yaml_components(c) + library = resolve_library(input_library) + components_input = resolve_components_and_cnx(components_file, library) + # 18 produced in the 1st time-step, then consumed 2 * efficiency in the rest + scenarios = 1 + horizon = 10 + time_blocks = [TimeBlock(0, list(range(horizon)))] + + database = build_data_base(components_file, data_dir) + network = build_network(components_input) + + problem = build_problem( + network, + database, + time_blocks[0], + scenarios, + border_management=BlockBorderManagement.CYCLE, + ) + status = problem.solver.Solve() + + assert status == problem.solver.OPTIMAL + + # The short-term storage should satisfy the load + # No spillage / unsupplied energy is expected + assert problem.solver.Objective().Value() == 0 + + count_variables = 0 + for variable in problem.solver.variables(): + if "injection" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 100 + elif "withdrawal" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 50 + elif "level" in variable.name(): + count_variables += 1 + assert 0 <= variable.solution_value() <= 1000 + assert count_variables == 3 * horizon diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index bd243c34..02f9d979 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -9,13 +9,13 @@ # SPDX-License-Identifier: MPL-2.0 # # This file is part of the Antares project. - +from pathlib import Path from typing import Union +import pandas as pd import pytest from andromede.expression import param, var -from andromede.expression.expression import port_field from andromede.expression.indexing_structure import IndexingStructure from andromede.libs.standard import ( BALANCE_PORT_TYPE, @@ -43,11 +43,11 @@ ScenarioIndex, ScenarioSeriesData, TimeIndex, - TimeScenarioIndex, TimeScenarioSeriesData, TimeSeriesData, create_component, ) +from andromede.study.data import load_ts_from_txt @pytest.fixture @@ -123,7 +123,23 @@ def mock_generator_with_scenario_varying_fixed_time_param() -> Model: return scenario_varying_fixed_time_generator -def test_requirements_consistency_demand_model_fix_ok(mock_network: Network) -> None: +@pytest.fixture +def demand_data() -> TimeScenarioSeriesData: + demand_data = pd.DataFrame( + [ + [100], + [50], + ], + index=[0, 1], + columns=[0], + ) + + return TimeScenarioSeriesData(demand_data) + + +def test_requirements_consistency_demand_model_fix_ok( + mock_network: Network, demand_data: TimeScenarioSeriesData +) -> None: # Given # database data for "demand" defined as Time varying # and model "D" DEMAND_MODEL is TIME_AND_SCENARIO_FREE @@ -131,9 +147,6 @@ def test_requirements_consistency_demand_model_fix_ok(mock_network: Network) -> database.add_data("G", "p_max", ConstantData(100)) database.add_data("G", "cost", ConstantData(30)) - demand_data = TimeScenarioSeriesData( - {TimeScenarioIndex(0, 0): 100, TimeScenarioIndex(1, 0): 50} - ) database.add_data("D", "demand", demand_data) # When @@ -155,7 +168,7 @@ def test_requirements_consistency_generator_model_ok(mock_network: Network) -> N def test_consistency_generation_time_free_for_constant_model_raises_exception( - mock_network: Network, + mock_network: Network, demand_data: TimeScenarioSeriesData ) -> None: # Given # database data for "p_max" defined as time varying @@ -165,9 +178,6 @@ def test_consistency_generation_time_free_for_constant_model_raises_exception( database.add_data("G", "cost", ConstantData(30)) - demand_data = TimeScenarioSeriesData( - {TimeScenarioIndex(0, 0): 100, TimeScenarioIndex(1, 0): 50} - ) database.add_data("D", "demand", demand_data) database.add_data("G", "p_max", demand_data) @@ -177,7 +187,7 @@ def test_consistency_generation_time_free_for_constant_model_raises_exception( def test_requirements_consistency_demand_model_time_varying_ok( - mock_network: Network, + mock_network: Network, demand_data: TimeScenarioSeriesData ) -> None: # Given # database data for "demand" defined as constant @@ -185,10 +195,6 @@ def test_requirements_consistency_demand_model_time_varying_ok( database = DataBase() database.add_data("G", "p_max", ConstantData(100)) database.add_data("G", "cost", ConstantData(30)) - - demand_data = TimeScenarioSeriesData( - {TimeScenarioIndex(0, 0): 100, TimeScenarioIndex(1, 0): 50} - ) database.add_data("D", "demand", demand_data) # When @@ -227,12 +233,14 @@ def test_requirements_consistency_time_varying_parameter_with_correct_data_passe (ScenarioSeriesData({ScenarioIndex(0): 100, ScenarioIndex(1): 50})), ( TimeScenarioSeriesData( - { - TimeScenarioIndex(0, 0): 100, - TimeScenarioIndex(0, 1): 50, - TimeScenarioIndex(1, 0): 500, - TimeScenarioIndex(1, 1): 540, - } + pd.DataFrame( + [ + [100, 500], + [500, 540], + ], + index=[0, 1], + columns=[0, 1], + ) ) ), ], @@ -270,12 +278,7 @@ def test_requirements_consistency_time_varying_parameter_with_scenario_varying_d (TimeSeriesData({TimeIndex(0): 100, TimeIndex(1): 50})), ( TimeScenarioSeriesData( - { - TimeScenarioIndex(0, 0): 100, - TimeScenarioIndex(0, 1): 50, - TimeScenarioIndex(1, 0): 500, - TimeScenarioIndex(1, 1): 540, - } + pd.DataFrame({(0, 0): [100, 500], (0, 1): [50, 540]}, index=[0, 1]) ) ), ], @@ -328,3 +331,13 @@ def test_requirements_consistency_scenario_varying_parameter_with_correct_data_p # No ValueError should be raised database.requirements_consistency(network) + + +def test_load_data_from_txt(data_dir: Path): + txt_file = "gen-costs" + + gen_costs = load_ts_from_txt(txt_file, data_dir) + expected_timeseries = pd.DataFrame( + [[100, 200], [50, 100]], index=[0, 1], columns=[0, 1] + ) + assert gen_costs.equals(expected_timeseries) diff --git a/tests/unittests/test_model.py b/tests/unittests/test_model.py index b5efe767..7d567683 100644 --- a/tests/unittests/test_model.py +++ b/tests/unittests/test_model.py @@ -22,7 +22,7 @@ port_field, var, ) -from andromede.model import Constraint, float_variable, model +from andromede.model import Constraint, float_parameter, float_variable, model from andromede.model.model import PortFieldDefinition, port_field_def @@ -206,3 +206,13 @@ def test_instantiating_a_model_with_non_linear_scenario_operator_in_the_objectiv def test_invalid_port_field_definition_should_raise(expression: ExpressionNode) -> None: with pytest.raises(ValueError) as exc: port_field_def(port_name="p", field_name="f", definition=expression) + + +def test_constraint_equals(): + # checks in particular that expressions are correctly compared + assert Constraint(name="c", expression=var("x") <= param("p")) == Constraint( + name="c", expression=var("x") <= param("p") + ) + assert Constraint(name="c", expression=var("x") <= param("p")) != Constraint( + name="c", expression=var("y") <= param("p") + ) diff --git a/tests/unittests/test_utils.py b/tests/unittests/test_utils.py index 306fd821..f88477eb 100644 --- a/tests/unittests/test_utils.py +++ b/tests/unittests/test_utils.py @@ -10,9 +10,17 @@ # # This file is part of the Antares project. +from typing import List + +import pandas as pd import pytest -from andromede.study import TimeScenarioIndex, TimeScenarioSeriesData +from andromede.study import ( + TimeIndex, + TimeScenarioIndex, + TimeScenarioSeriesData, + TimeSeriesData, +) from andromede.utils import get_or_add @@ -32,9 +40,11 @@ def value_factory() -> str: assert get_or_add(d, "key2", value_factory) == "value2" -def generate_data(value: float, horizon: int, scenarios: int) -> TimeScenarioSeriesData: - data = {} - for absolute_timestep in range(horizon): - for scenario in range(scenarios): - data[TimeScenarioIndex(absolute_timestep, scenario)] = value +def generate_scalar_matrix_data( + value: float, horizon: int, scenarios: int +) -> TimeScenarioSeriesData: + data = pd.DataFrame(index=range(horizon), columns=range(scenarios)) + + data.fillna(value, inplace=True) + return TimeScenarioSeriesData(time_scenario_series=data) From 52705ff3675d8ee067cfd8ab2f9736638a250ff2 Mon Sep 17 00:00:00 2001 From: Ian Menezes Date: Wed, 24 Jul 2024 16:00:08 +0200 Subject: [PATCH 11/12] Fixed var name tree prefix --- src/andromede/simulation/optimization.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/andromede/simulation/optimization.py b/src/andromede/simulation/optimization.py index c23ba378..a46fcc01 100644 --- a/src/andromede/simulation/optimization.py +++ b/src/andromede/simulation/optimization.py @@ -743,6 +743,9 @@ def _create_variables(self) -> None: var_name: str = f"{model_var.name}" component_prefix = f"{component.id}_" if component.id else "" + tree_prefix = ( + f"{self.context.tree_node}_" if self.context.tree_node else "" + ) for block_timestep in self.context.get_time_indices(var_indexing): block_suffix = ( @@ -776,7 +779,7 @@ def _create_variables(self) -> None: # Internally, it will be indexed by a structure that into account # the component id, variable name, timestep and scenario separately solver_var = None - solver_var_name = f"{component_prefix}{var_name}{block_suffix}{scenario_suffix}" + solver_var_name = f"{tree_prefix}{component_prefix}{var_name}{block_suffix}{scenario_suffix}" if math.isclose(lower_bound, upper_bound): raise ValueError( From fbe54ff17d1b1e038dc93701df54c577423dcbcc Mon Sep 17 00:00:00 2001 From: Ian Menezes Date: Thu, 18 Jul 2024 17:10:26 +0200 Subject: [PATCH 12/12] Cherry-picked benders_decomposed debug mode --- src/andromede/simulation/benders_decomposed.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/andromede/simulation/benders_decomposed.py b/src/andromede/simulation/benders_decomposed.py index 57a03ff4..5d62cc5e 100644 --- a/src/andromede/simulation/benders_decomposed.py +++ b/src/andromede/simulation/benders_decomposed.py @@ -197,8 +197,11 @@ def run( solver_name: str = "XPRESS", log_level: int = 0, should_merge: bool = False, + show_debug: bool = False, ) -> bool: - self.initialise(solver_name=solver_name, log_level=log_level) + self.initialise( + solver_name=solver_name, log_level=log_level, is_debug=show_debug + ) if not should_merge: return_code = BendersRunner(self.emplacement).run() @@ -251,7 +254,7 @@ def build_benders_decomposed_problem( subproblems = [] # Benders Decomposed Sub-problems for tree_node in decision_tree_root.traverse(): - suffix = f"_{tree_node.id}" if decision_tree_root.size > 1 else "" + suffix_tree = f"_{tree_node.id}" if decision_tree_root.size > 1 else "" masters.append( build_problem( @@ -259,7 +262,7 @@ def build_benders_decomposed_problem( database, null_time_block, null_scenario, - problem_name=f"master{suffix}", + problem_name=f"master{suffix_tree}", solver_id=solver_id, build_strategy=InvestmentProblemStrategy(), decision_tree_node=tree_node.id, @@ -268,8 +271,7 @@ def build_benders_decomposed_problem( ) for block in tree_node.config.blocks: - if len(tree_node.config.blocks) > 1: - suffix += f"_t{block.id}" + suffix_block = f"_b{block.id}" if len(tree_node.config.blocks) > 1 else "" subproblems.append( build_problem( @@ -277,7 +279,7 @@ def build_benders_decomposed_problem( database, block, tree_node.config.scenarios, - problem_name=f"subproblem{suffix}", + problem_name=f"subproblem{suffix_tree}{suffix_block}", solver_id=solver_id, build_strategy=OperationalProblemStrategy(), decision_tree_node=tree_node.id,