From 6417c7a122954a08bd83af1fd639ea160510c864 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Fri, 12 Jan 2024 16:42:12 +0100 Subject: [PATCH 01/42] build_pop_weighted_energy: don't reduce district heat share Previously the DH share was being multiplied by the population weighting, reducing the DH share with multiple nodes. --- scripts/build_population_weighted_energy_totals.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/build_population_weighted_energy_totals.py b/scripts/build_population_weighted_energy_totals.py index 879e3b9b2..20467f72c 100644 --- a/scripts/build_population_weighted_energy_totals.py +++ b/scripts/build_population_weighted_energy_totals.py @@ -26,4 +26,9 @@ nodal_energy_totals.index = pop_layout.index nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0) + # district heating share should not be divided by population fraction + dh_share = energy_totals["district heat share"].loc[pop_layout.ct].fillna(0.0) + dh_share.index = pop_layout.index + nodal_energy_totals["district heat share"] = dh_share + nodal_energy_totals.to_csv(snakemake.output[0]) From 45603385d22229a0c6060f70df1c05afbe71cd5f Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 15 Jan 2024 16:47:19 +0100 Subject: [PATCH 02/42] move building of daily heat profile to its own script Previously this was handled inside prepare_sector_network.py. --- rules/build_sector.smk | 36 +++++++--- ...t_demand.py => build_daily_heat_demand.py} | 0 scripts/build_hourly_heat_demand.py | 69 +++++++++++++++++++ scripts/prepare_sector_network.py | 29 ++------ 4 files changed, 102 insertions(+), 32 deletions(-) rename scripts/{build_heat_demand.py => build_daily_heat_demand.py} (100%) create mode 100644 scripts/build_hourly_heat_demand.py diff --git a/rules/build_sector.smk b/rules/build_sector.smk index ab8ff4edf..98d3df2d6 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -139,7 +139,7 @@ if not (config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]): gas_infrastructure = {} -rule build_heat_demands: +rule build_daily_heat_demand: params: snapshots=config["snapshots"], input: @@ -147,18 +147,39 @@ rule build_heat_demands: regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", output: - heat_demand=RESOURCES + "heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", resources: mem_mb=20000, threads: 8 log: - LOGS + "build_heat_demands_{scope}_{simpl}_{clusters}.loc", + LOGS + "build_daily_heat_demand_{scope}_{simpl}_{clusters}.loc", benchmark: - BENCHMARKS + "build_heat_demands/{scope}_s{simpl}_{clusters}" + BENCHMARKS + "build_daily_heat_demand/{scope}_s{simpl}_{clusters}" conda: "../envs/environment.yaml" script: - "../scripts/build_heat_demand.py" + "../scripts/build_daily_heat_demand.py" + + +rule build_hourly_heat_demand: + params: + snapshots=config["snapshots"], + input: + heat_profile="data/heat_load_profile_BDEW.csv", + heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + output: + heat_demand=RESOURCES + "hourly_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + resources: + mem_mb=2000, + threads: 8 + log: + LOGS + "build_hourly_heat_demand_{scope}_{simpl}_{clusters}.loc", + benchmark: + BENCHMARKS + "build_hourly_heat_demand/{scope}_s{simpl}_{clusters}" + conda: + "../envs/environment.yaml" + script: + "../scripts/build_hourly_heat_demand.py" rule build_temperature_profiles: @@ -742,7 +763,6 @@ rule prepare_sector_network: if config["foresight"] == "overnight" else RESOURCES + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_profile="data/heat_load_profile_BDEW.csv", costs="data/costs_{}.csv".format(config["costs"]["year"]) if config["foresight"] == "overnight" else "data/costs_{planning_horizons}.csv", @@ -755,9 +775,7 @@ rule prepare_sector_network: simplified_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_demand_urban=RESOURCES + "heat_demand_urban_elec_s{simpl}_{clusters}.nc", - heat_demand_rural=RESOURCES + "heat_demand_rural_elec_s{simpl}_{clusters}.nc", - heat_demand_total=RESOURCES + "heat_demand_total_elec_s{simpl}_{clusters}.nc", + hourly_heat_demand_total=RESOURCES + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", diff --git a/scripts/build_heat_demand.py b/scripts/build_daily_heat_demand.py similarity index 100% rename from scripts/build_heat_demand.py rename to scripts/build_daily_heat_demand.py diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py new file mode 100644 index 000000000..94ad72664 --- /dev/null +++ b/scripts/build_hourly_heat_demand.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build hourly heat demand time series from daily ones. +""" + +import pandas as pd +import xarray as xr +from _helpers import generate_periodic_profiles, update_config_with_sector_opts +from itertools import product + + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_heat_demands", + simpl="", + clusters=48, + ) + + snapshots = pd.date_range(freq="h", **snakemake.params.snapshots) + + daily_space_heat_demand = ( + xr.open_dataarray(snakemake.input.heat_demand) + .to_pandas() + .reindex(index=snapshots, method="ffill") + ) + + intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) + + sectors = ["residential", "services"] + uses = ["water", "space"] + + heat_demand = {} + for sector, use in product(sectors, uses): + weekday = list(intraday_profiles[f"{sector} {use} weekday"]) + weekend = list(intraday_profiles[f"{sector} {use} weekend"]) + weekly_profile = weekday * 5 + weekend * 2 + intraday_year_profile = generate_periodic_profiles( + daily_space_heat_demand.index.tz_localize("UTC"), + nodes=daily_space_heat_demand.columns, + weekly_profile=weekly_profile, + ) + + if use == "space": + heat_demand[f"{sector} {use}"] = daily_space_heat_demand * intraday_year_profile + else: + heat_demand[f"{sector} {use}"] = intraday_year_profile + + heat_demand = pd.concat(heat_demand, + axis=1, + names = ["sector use", "node"]) + + heat_demand.index.name="snapshots" + + print(heat_demand) + + print(heat_demand.stack()) + + ds = heat_demand.stack().to_xarray()#xr.Dataset.from_dataframe(heat_demand) + + print(ds) + + ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index ea0c4f3fb..04c123efa 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -17,7 +17,7 @@ import pandas as pd import pypsa import xarray as xr -from _helpers import generate_periodic_profiles, update_config_with_sector_opts +from _helpers import update_config_with_sector_opts from add_electricity import calculate_annuity, sanitize_carriers from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 from networkx.algorithms import complement @@ -1629,14 +1629,8 @@ def add_land_transport(n, costs): def build_heat_demand(n): - # copy forward the daily average heat demand into each hour, so it can be multiplied by the intraday profile - daily_space_heat_demand = ( - xr.open_dataarray(snakemake.input.heat_demand_total) - .to_pandas() - .reindex(index=n.snapshots, method="ffill") - ) - intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) + heat_demand_shape = xr.open_dataset(snakemake.input.hourly_heat_demand_total).to_dataframe().unstack(level=1) sectors = ["residential", "services"] uses = ["water", "space"] @@ -1644,25 +1638,14 @@ def build_heat_demand(n): heat_demand = {} electric_heat_supply = {} for sector, use in product(sectors, uses): - weekday = list(intraday_profiles[f"{sector} {use} weekday"]) - weekend = list(intraday_profiles[f"{sector} {use} weekend"]) - weekly_profile = weekday * 5 + weekend * 2 - intraday_year_profile = generate_periodic_profiles( - daily_space_heat_demand.index.tz_localize("UTC"), - nodes=daily_space_heat_demand.columns, - weekly_profile=weekly_profile, - ) - if use == "space": - heat_demand_shape = daily_space_heat_demand * intraday_year_profile - else: - heat_demand_shape = intraday_year_profile + name = f"{sector} {use}" - heat_demand[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + heat_demand[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 electric_heat_supply[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 heat_demand = pd.concat(heat_demand, axis=1) From 15488eda429ad9c0f952c150deb610015b10123e Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 15 Jan 2024 17:51:08 +0100 Subject: [PATCH 03/42] build_energy_totals: output district heat share to separate file --- rules/build_sector.smk | 1 + scripts/build_energy_totals.py | 31 +++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/rules/build_sector.smk b/rules/build_sector.smk index 98d3df2d6..a60dc4fe2 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -272,6 +272,7 @@ rule build_energy_totals: energy_name=RESOURCES + "energy_totals.csv", co2_name=RESOURCES + "co2_totals.csv", transport_name=RESOURCES + "transport_data.csv", + district_heat_share=RESOURCES + "district_heat_share.csv", threads: 16 resources: mem_mb=10000, diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 67b864663..f7da2a375 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -394,13 +394,6 @@ def build_idees(countries, year): # convert TWh/100km to kWh/km totals.loc["passenger car efficiency"] *= 10 - # district heating share - district_heat = totals.loc[ - ["derived heat residential", "derived heat services"] - ].sum() - total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum() - totals.loc["district heat share"] = district_heat.div(total_heat) - return totals.T @@ -575,16 +568,31 @@ def build_energy_totals(countries, eurostat, swiss, idees): ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"] df.loc["BA", missing] = ratio * df.loc["RS", missing] + return df + + +def build_district_heat_share(idees): + + # district heating share + district_heat = idees[ + ["derived heat residential", "derived heat services"] + ].sum(axis=1) + total_heat = idees[["thermal uses residential", "thermal uses services"]].sum(axis=1) + + district_heat_share = district_heat/total_heat + # Missing district heating share dh_share = pd.read_csv( snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] ) # make conservative assumption and take minimum from both data sets - df["district heat share"] = pd.concat( - [df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1 + district_heat_share = pd.concat( + [district_heat_share, dh_share.reindex(index=district_heat_share.index) / 100], axis=1 ).min(axis=1) - return df + district_heat_share.name = "district heat share" + + return district_heat_share def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): @@ -753,6 +761,9 @@ def build_transport_data(countries, population, idees): energy = build_energy_totals(countries, eurostat, swiss, idees) energy.to_csv(snakemake.output.energy_name) + district_heat_share = build_district_heat_share(idees) + district_heat_share.to_csv(snakemake.output.district_heat_share) + base_year_emissions = params["base_emissions_year"] emissions_scope = snakemake.params.energy["emissions"] eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope) From 0022a88517ce2352ace44d28ad1721880845ba2b Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 15 Jan 2024 18:55:09 +0100 Subject: [PATCH 04/42] move calculation of district heating share to its own script Now the script build_district_heat_share.py does what the old function create_nodes_for_heating() in prepare_sector_networks.py did. There is no need to build nodes lists for each heating sector, since all nodes have district heating now. --- rules/build_sector.smk | 22 +++ scripts/build_district_heat_share.py | 77 +++++++++ scripts/build_energy_totals.py | 6 +- ...build_population_weighted_energy_totals.py | 5 - scripts/prepare_sector_network.py | 161 +++++++----------- 5 files changed, 164 insertions(+), 107 deletions(-) create mode 100644 scripts/build_district_heat_share.py diff --git a/rules/build_sector.smk b/rules/build_sector.smk index a60dc4fe2..e1753a2b0 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -726,6 +726,27 @@ rule build_transport_demand: "../scripts/build_transport_demand.py" + + +rule build_district_heat_share: + params: + sector=config["sector"], + input: + district_heat_share=RESOURCES + "district_heat_share.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + output: + district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + threads: 1 + resources: + mem_mb=1000, + log: + LOGS + "build_district_heat_share_s{simpl}_{clusters}_{planning_horizons}.log", + conda: + "../envs/environment.yaml" + script: + "../scripts/build_district_heat_share.py" + + rule prepare_sector_network: params: co2_budget=config["co2_budget"], @@ -777,6 +798,7 @@ rule prepare_sector_network: industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", hourly_heat_demand_total=RESOURCES + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", + district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py new file mode 100644 index 000000000..d521214da --- /dev/null +++ b/scripts/build_district_heat_share.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build district heat shares at each node, depending on investment year. +""" + +import pandas as pd + +from prepare_sector_network import get + +import logging + + +logger = logging.getLogger(__name__) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_heat_demands", + simpl="", + clusters=48, + ) + + investment_year = int(snakemake.wildcards.planning_horizons[-4:]) + + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, + index_col=0) + + district_heat_share = pd.read_csv(snakemake.input.district_heat_share, + index_col=0).squeeze() + + # make ct-based share nodal + district_heat_share = district_heat_share.loc[pop_layout.ct] + district_heat_share.index = pop_layout.index + + # total urban population per country + ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() + + # distribution of urban population within a country + pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) + + # fraction of node that is urban + urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) + + # maximum potential of urban demand covered by district heating + central_fraction = snakemake.config["sector"]["district_heating"]["potential"] + + # district heating share at each node + dist_fraction_node = ( + district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] + ) + + # if district heating share larger than urban fraction -> set urban + # fraction to district heating share + urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) + + # difference of max potential and today's share of district heating + diff = (urban_fraction * central_fraction) - dist_fraction_node + progress = get(snakemake.config["sector"]["district_heating"]["progress"], investment_year) + dist_fraction_node += diff * progress + logger.info( + f"Increase district heating share by a progress factor of {progress:.2%} " + f"resulting in new average share of {dist_fraction_node.mean():.2%}" + ) + + df = pd.DataFrame(dtype=float) + + df["original district heat share"] = district_heat_share + df["district fraction of node"] = dist_fraction_node + df["urban fraction"] = urban_fraction + + df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index f7da2a375..c00bf3b30 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -571,7 +571,7 @@ def build_energy_totals(countries, eurostat, swiss, idees): return df -def build_district_heat_share(idees): +def build_district_heat_share(countries, idees): # district heating share district_heat = idees[ @@ -581,6 +581,8 @@ def build_district_heat_share(idees): district_heat_share = district_heat/total_heat + district_heat_share = district_heat_share.reindex(countries) + # Missing district heating share dh_share = pd.read_csv( snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] @@ -761,7 +763,7 @@ def build_transport_data(countries, population, idees): energy = build_energy_totals(countries, eurostat, swiss, idees) energy.to_csv(snakemake.output.energy_name) - district_heat_share = build_district_heat_share(idees) + district_heat_share = build_district_heat_share(countries, idees) district_heat_share.to_csv(snakemake.output.district_heat_share) base_year_emissions = params["base_emissions_year"] diff --git a/scripts/build_population_weighted_energy_totals.py b/scripts/build_population_weighted_energy_totals.py index 20467f72c..879e3b9b2 100644 --- a/scripts/build_population_weighted_energy_totals.py +++ b/scripts/build_population_weighted_energy_totals.py @@ -26,9 +26,4 @@ nodal_energy_totals.index = pop_layout.index nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0) - # district heating share should not be divided by population fraction - dh_share = energy_totals["district heat share"].loc[pop_layout.ct].fillna(0.0) - dh_share.index = pop_layout.index - nodal_energy_totals["district heat share"] = dh_share - nodal_energy_totals.to_csv(snakemake.output[0]) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 04c123efa..a5c80caee 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1668,7 +1668,10 @@ def add_heat(n, costs): heat_demand = build_heat_demand(n) - nodes, dist_fraction, urban_fraction = create_nodes_for_heat_sector() + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, + index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] # NB: must add costs of central heating afterwards (EUR 400 / kWpeak, 50a, 1% FOM from Fraunhofer ISE) @@ -1705,6 +1708,8 @@ def add_heat(n, costs): # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 solar_thermal = options["solar_cf_correction"] * solar_thermal / 1e3 + nodes = pop_layout.index + for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" @@ -1712,8 +1717,8 @@ def add_heat(n, costs): n.madd( "Bus", - nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat", + location=nodes, carrier=name + " heat", unit="MWh_th", ) @@ -1721,9 +1726,9 @@ def add_heat(n, costs): if name == "urban central" and options.get("central_heat_vent"): n.madd( "Generator", - nodes[name] + f" {name} heat vent", - bus=nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat vent", + bus=nodes + f" {name} heat", + location=nodes, carrier=name + " heat vent", p_nom_extendable=True, p_max_pu=0, @@ -1736,11 +1741,11 @@ def add_heat(n, costs): for sector in sectors: # heat demand weighting if "rural" in name: - factor = 1 - urban_fraction[nodes[name]] + factor = 1 - urban_fraction[nodes] elif "urban central" in name: - factor = dist_fraction[nodes[name]] + factor = dist_fraction[nodes] elif "urban decentral" in name: - factor = urban_fraction[nodes[name]] - dist_fraction[nodes[name]] + factor = urban_fraction[nodes] - dist_fraction[nodes] else: raise NotImplementedError( f" {name} not in " f"heat systems: {heat_systems}" @@ -1751,7 +1756,7 @@ def add_heat(n, costs): heat_demand[[sector + " water", sector + " space"]] .T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply(factor) ) @@ -1759,7 +1764,7 @@ def add_heat(n, costs): heat_load = ( heat_demand.T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply( factor * (1 + options["district_heating"]["district_heating_loss"]) ) @@ -1767,9 +1772,9 @@ def add_heat(n, costs): n.madd( "Load", - nodes[name], + nodes, suffix=f" {name} heat", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " heat", p_set=heat_load, ) @@ -1780,17 +1785,17 @@ def add_heat(n, costs): costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" efficiency = ( - cop[heat_pump_type][nodes[name]] + cop[heat_pump_type][nodes] if options["time_dep_hp_cop"] else costs.at[costs_name, "efficiency"] ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] @@ -1804,17 +1809,17 @@ def add_heat(n, costs): n.madd( "Bus", - nodes[name] + f" {name} water tanks", - location=nodes[name], + nodes + f" {name} water tanks", + location=nodes, carrier=name + " water tanks", unit="MWh_th", ) n.madd( "Link", - nodes[name] + f" {name} water tanks charger", - bus0=nodes[name] + f" {name} heat", - bus1=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks charger", + bus0=nodes + f" {name} heat", + bus1=nodes + f" {name} water tanks", efficiency=costs.at["water tank charger", "efficiency"], carrier=name + " water tanks charger", p_nom_extendable=True, @@ -1822,9 +1827,9 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} water tanks discharger", - bus0=nodes[name] + f" {name} water tanks", - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} water tanks discharger", + bus0=nodes + f" {name} water tanks", + bus1=nodes + f" {name} heat", carrier=name + " water tanks discharger", efficiency=costs.at["water tank discharger", "efficiency"], p_nom_extendable=True, @@ -1843,8 +1848,8 @@ def add_heat(n, costs): n.madd( "Store", - nodes[name] + f" {name} water tanks", - bus=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks", + bus=nodes + f" {name} water tanks", e_cyclic=True, e_nom_extendable=True, carrier=name + " water tanks", @@ -1858,9 +1863,9 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} resistive heater", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} resistive heater", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=name + " resistive heater", efficiency=costs.at[key, "efficiency"], capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], @@ -1872,10 +1877,10 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} gas boiler", + nodes + f" {name} gas boiler", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[key, "efficiency"], @@ -1889,13 +1894,13 @@ def add_heat(n, costs): n.madd( "Generator", - nodes[name], + nodes, suffix=f" {name} solar thermal collector", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " solar thermal", p_nom_extendable=True, capital_cost=costs.at[name_type + " solar thermal", "fixed"], - p_max_pu=solar_thermal[nodes[name]], + p_max_pu=solar_thermal[nodes], lifetime=costs.at[name_type + " solar thermal", "lifetime"], ) @@ -1903,10 +1908,10 @@ def add_heat(n, costs): # add gas CHP; biomass CHP is added in biomass section n.madd( "Link", - nodes[name] + " urban central gas CHP", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", carrier="urban central gas CHP", p_nom_extendable=True, @@ -1922,12 +1927,12 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + " urban central gas CHP CC", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP CC", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", - bus4=spatial.co2.df.loc[nodes[name], "nodes"].values, + bus4=spatial.co2.df.loc[nodes, "nodes"].values, carrier="urban central gas CHP CC", p_nom_extendable=True, capital_cost=costs.at["central gas CHP", "fixed"] @@ -1959,11 +1964,11 @@ def add_heat(n, costs): if options["chp"] and options["micro_chp"] and name != "urban central": n.madd( "Link", - nodes[name] + f" {name} micro gas CHP", + nodes + f" {name} micro gas CHP", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + f" {name} heat", bus3="co2 atmosphere", carrier=name + " micro gas CHP", efficiency=costs.at["micro CHP", "efficiency"], @@ -2083,50 +2088,6 @@ def add_heat(n, costs): ) -def create_nodes_for_heat_sector(): - # TODO pop_layout - - # rural are areas with low heating density and individual heating - # urban are areas with high heating density - # urban can be split into district heating (central) and individual heating (decentral) - - ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() - # distribution of urban population within a country - pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) - - sectors = ["residential", "services"] - - nodes = {} - urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) - - for sector in sectors: - nodes[sector + " rural"] = pop_layout.index - nodes[sector + " urban decentral"] = pop_layout.index - - district_heat_share = pop_weighted_energy_totals["district heat share"] - - # maximum potential of urban demand covered by district heating - central_fraction = options["district_heating"]["potential"] - # district heating share at each node - dist_fraction_node = ( - district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] - ) - nodes["urban central"] = dist_fraction_node.index - # if district heating share larger than urban fraction -> set urban - # fraction to district heating share - urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) - # difference of max potential and today's share of district heating - diff = (urban_fraction * central_fraction) - dist_fraction_node - progress = get(options["district_heating"]["progress"], investment_year) - dist_fraction_node += diff * progress - logger.info( - f"Increase district heating share by a progress factor of {progress:.2%} " - f"resulting in new average share of {dist_fraction_node.mean():.2%}" - ) - - return nodes, dist_fraction_node, urban_fraction - - def add_biomass(n, costs): logger.info("Add biomass") @@ -2337,7 +2298,7 @@ def add_biomass(n, costs): if options["biomass_boiler"]: # TODO: Add surcharge for pellets - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", "services rural", @@ -2346,10 +2307,10 @@ def add_biomass(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} biomass boiler", + nodes + f" {name} biomass boiler", p_nom_extendable=True, - bus0=spatial.biomass.df.loc[nodes_heat[name], "nodes"].values, - bus1=nodes_heat[name] + f" {name} heat", + bus0=spatial.biomass.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", carrier=name + " biomass boiler", efficiency=costs.at["biomass boiler", "efficiency"], capital_cost=costs.at["biomass boiler", "efficiency"] @@ -2792,7 +2753,7 @@ def add_industry(n, costs): ) if options["oil_boilers"]: - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", @@ -2802,10 +2763,10 @@ def add_industry(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} oil boiler", + nodes + f" {name} oil boiler", p_nom_extendable=True, bus0=spatial.oil.nodes, - bus1=nodes_heat[name] + f" {name} heat", + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=f"{name} oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], From 63f393ef2c95cde3aa25808e19d6d39593d2aa5b Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Fri, 19 Jan 2024 16:24:39 +0100 Subject: [PATCH 05/42] only add district heating (DH) for nodes with non-zero DH share --- scripts/prepare_sector_network.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index a5c80caee..3e0f388b3 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1708,11 +1708,15 @@ def add_heat(n, costs): # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 solar_thermal = options["solar_cf_correction"] * solar_thermal / 1e3 - nodes = pop_layout.index for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" + if name == "urban central": + nodes = dist_fraction.index[dist_fraction > 0] + else: + nodes = pop_layout.index + n.add("Carrier", name + " heat") n.madd( From 88f61220e2261d0cb758d8e0e6930f7116bbbcca Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Fri, 19 Jan 2024 18:42:49 +0100 Subject: [PATCH 06/42] move building of distribution of existing heating to own script This makes the distribution of existing heating to urban/rural, residential/services and spatially more transparent. --- rules/solve_myopic.smk | 37 ++++- scripts/add_existing_baseyear.py | 134 ++++-------------- .../build_existing_heating_distribution.py | 108 ++++++++++++++ 3 files changed, 171 insertions(+), 108 deletions(-) create mode 100644 scripts/build_existing_heating_distribution.py diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 7ca8857db..20043286f 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -1,8 +1,40 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-4 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT +rule build_existing_heating_distribution: + params: + baseyear=config["scenario"]["planning_horizons"][0], + sector=config["sector"], + existing_capacities=config["existing_capacities"], + input: + existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + clustered_pop_energy_layout=RESOURCES + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + output: + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + wildcard_constraints: + planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear + threads: 1 + resources: + mem_mb=2000, + log: + LOGS + + "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log", + benchmark: + ( + BENCHMARKS + + "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/build_existing_heating_distribution.py" + + rule add_existing_baseyear: params: baseyear=config["scenario"]["planning_horizons"][0], @@ -19,7 +51,8 @@ rule add_existing_baseyear: costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index e7894324d..e7d766de6 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -414,97 +414,20 @@ def add_heating_capacities_installed_before_baseyear( # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". # TODO start from original file - # retrieve existing heating capacities - techs = [ - "gas boiler", - "oil boiler", - "resistive heater", - "air heat pump", - "ground heat pump", - ] - df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) - - # data for Albania, Montenegro and Macedonia not included in database - df.loc["Albania"] = np.nan - df.loc["Montenegro"] = np.nan - df.loc["Macedonia"] = np.nan - - df.fillna(0.0, inplace=True) + existing_heating = pd.read_csv(snakemake.input.existing_heating_distribution, + header=[0,1], + index_col=0) - # convert GW to MW - df *= 1e3 - df.index = cc.convert(df.index, to="iso2") + techs = existing_heating.columns.get_level_values(1).unique() - # coal and oil boilers are assimilated to oil boilers - df["oil boiler"] = df["oil boiler"] + df["coal boiler"] - df.drop(["coal boiler"], axis=1, inplace=True) + for name in existing_heating.columns.get_level_values(0).unique(): - # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - - nodal_df = df.loc[pop_layout.ct] - nodal_df.index = pop_layout.index - nodal_df = nodal_df.multiply(pop_layout.fraction, axis=0) - - # split existing capacities between residential and services - # proportional to energy demand - p_set_sum = n.loads_t.p_set.sum() - ratio_residential = pd.Series( - [ - ( - p_set_sum[f"{node} residential rural heat"] - / ( - p_set_sum[f"{node} residential rural heat"] - + p_set_sum[f"{node} services rural heat"] - ) - ) - # if rural heating demand for one of the nodes doesn't exist, - # then columns were dropped before and heating demand share should be 0.0 - if all( - f"{node} {service} rural heat" in p_set_sum.index - for service in ["residential", "services"] - ) - else 0.0 - for node in nodal_df.index - ], - index=nodal_df.index, - ) + name_type = "central" if name == "urban central" else "decentral" - for tech in techs: - nodal_df["residential " + tech] = nodal_df[tech] * ratio_residential - nodal_df["services " + tech] = nodal_df[tech] * (1 - ratio_residential) + nodes = pd.Index(n.buses.location[n.buses.index.str.contains(f"{name} heat")]) - names = [ - "residential rural", - "services rural", - "residential urban decentral", - "services urban decentral", - "urban central", - ] - - nodes = {} - p_nom = {} - for name in names: - name_type = "central" if name == "urban central" else "decentral" - nodes[name] = pd.Index( - [ - n.buses.at[index, "location"] - for index in n.buses.index[ - n.buses.index.str.contains(name) - & n.buses.index.str.contains("heat") - ] - ] - ) heat_pump_type = "air" if "urban" in name else "ground" - heat_type = "residential" if "residential" in name else "services" - - if name == "urban central": - p_nom[name] = nodal_df["air heat pump"][nodes[name]] - else: - p_nom[name] = nodal_df[f"{heat_type} {heat_pump_type} heat pump"][ - nodes[name] - ] # Add heat pumps costs_name = f"decentral {heat_pump_type}-sourced heat pump" @@ -512,7 +435,7 @@ def add_heating_capacities_installed_before_baseyear( cop = {"air": ashp_cop, "ground": gshp_cop} if time_dep_hp_cop: - efficiency = cop[heat_pump_type][nodes[name]] + efficiency = cop[heat_pump_type][nodes] else: efficiency = costs.at[costs_name, "efficiency"] @@ -525,27 +448,26 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=p_nom[name] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating[(name, f"{heat_pump_type} heat pump")][nodes] * ratio / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) # add resistive heater, gas boilers and oil boilers - # (50% capacities to rural buses, 50% to urban buses) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} resistive heater-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=name + " resistive heater", efficiency=costs.at[f"{name_type} resistive heater", "efficiency"], capital_cost=( @@ -553,21 +475,20 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} resistive heater", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} resistive heater"][nodes[name]] + existing_heating[(name, "resistive heater")][nodes] * ratio / costs.at[f"{name_type} resistive heater", "efficiency"] ), build_year=int(grouping_year), - lifetime=costs.at[costs_name, "lifetime"], + lifetime=costs.at[f"{name_type} resistive heater", "lifetime"], ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} gas boiler-{grouping_year}", bus0=spatial.gas.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[f"{name_type} gas boiler", "efficiency"], @@ -577,8 +498,7 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} gas boiler", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} gas boiler"][nodes[name]] + existing_heating[(name, "gas boiler")][nodes] * ratio / costs.at[f"{name_type} gas boiler", "efficiency"] ), @@ -588,20 +508,20 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} oil boiler-{grouping_year}", bus0=spatial.oil.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], - p_nom=0.5 - * nodal_df[f"{heat_type} oil boiler"][nodes[name]] - * ratio - / costs.at["decentral oil boiler", "efficiency"], + p_nom= ( + existing_heating[(name, "oil boiler")][nodes] + * ratio + / costs.at["decentral oil boiler", "efficiency"]), build_year=int(grouping_year), lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) @@ -629,6 +549,8 @@ def add_heating_capacities_installed_before_baseyear( # drop assets which are at the end of their lifetime links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index + logger.info("Removing following links because at end of their lifetime:") + logger.info(links_i) n.mremove("Link", links_i) diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py new file mode 100644 index 000000000..fe282d392 --- /dev/null +++ b/scripts/build_existing_heating_distribution.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Builds table of existing heat generation capacities for initial planning +horizon. +""" +import pandas as pd +import sys +from pypsa.descriptors import Dict +import numpy as np +import country_converter as coco + +cc = coco.CountryConverter() + + +def build_existing_heating(): + # retrieve existing heating capacities + techs = [ + "gas boiler", + "oil boiler", + "resistive heater", + "air heat pump", + "ground heat pump", + ] + + existing_heating = pd.read_csv(snakemake.input.existing_heating, + index_col=0, + header=0) + + # data for Albania, Montenegro and Macedonia not included in database existing_heating.loc["Albania"] = np.nan + existing_heating.loc["Montenegro"] = np.nan + existing_heating.loc["Macedonia"] = np.nan + + existing_heating.fillna(0.0, inplace=True) + + # convert GW to MW + existing_heating *= 1e3 + + existing_heating.index = cc.convert(existing_heating.index, to="iso2") + + # coal and oil boilers are assimilated to oil boilers + existing_heating["oil boiler"] = existing_heating["oil boiler"] + existing_heating["coal boiler"] + existing_heating.drop(["coal boiler"], axis=1, inplace=True) + + # distribute technologies to nodes by population + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, + index_col=0) + + nodal_heating = existing_heating.loc[pop_layout.ct] + nodal_heating.index = pop_layout.index + nodal_heating = nodal_heating.multiply(pop_layout.fraction, axis=0) + + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, + index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] + + energy_layout = pd.read_csv(snakemake.input.clustered_pop_energy_layout, + index_col=0) + + uses = ["space", "water"] + sectors = ["residential", "services"] + + nodal_sectoral_totals = pd.DataFrame(dtype=float) + + for sector in sectors: + nodal_sectoral_totals[sector] = energy_layout[[f"total {sector} {use}" for use in uses]].sum(axis=1) + + nodal_sectoral_fraction = nodal_sectoral_totals.div(nodal_sectoral_totals.sum(axis=1), + axis=0) + + + nodal_heat_name_fraction = pd.DataFrame(dtype=float) + + nodal_heat_name_fraction["urban central"] = dist_fraction + + for sector in sectors: + + nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[sector]*(1 - urban_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[sector]*(urban_fraction - dist_fraction) + + + nodal_heat_name_tech = pd.concat({name : nodal_heating .multiply(nodal_heat_name_fraction[name], + axis=0) for name in nodal_heat_name_fraction.columns}, + axis=1, + names=["heat name","technology"]) + + + #move all ground HPs to rural, all air to urban + + for sector in sectors: + nodal_heat_name_tech[(f"{sector} rural","ground heat pump")] += (nodal_heat_name_tech[("urban central","ground heat pump")]*nodal_sectoral_fraction[sector] + + nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")]) + nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")] = 0. + + nodal_heat_name_tech[(f"{sector} urban decentral","air heat pump")] += nodal_heat_name_tech[(f"{sector} rural","air heat pump")] + nodal_heat_name_tech[(f"{sector} rural","air heat pump")] = 0. + + nodal_heat_name_tech[("urban central","ground heat pump")] = 0. + + nodal_heat_name_tech.to_csv(snakemake.output.existing_heating_distribution) + + +if __name__ == "__main__": + + build_existing_heating() From b2539a32115368e01379d10b37bc8ae3f43c29da Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 22 Jan 2024 19:22:29 +0100 Subject: [PATCH 07/42] existing_heating: bugfix, missing Albania due to spacing error --- scripts/build_existing_heating_distribution.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py index fe282d392..9f9f265cd 100644 --- a/scripts/build_existing_heating_distribution.py +++ b/scripts/build_existing_heating_distribution.py @@ -16,20 +16,13 @@ def build_existing_heating(): - # retrieve existing heating capacities - techs = [ - "gas boiler", - "oil boiler", - "resistive heater", - "air heat pump", - "ground heat pump", - ] existing_heating = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) - # data for Albania, Montenegro and Macedonia not included in database existing_heating.loc["Albania"] = np.nan + # data for Albania, Montenegro and Macedonia not included in database + existing_heating.loc["Albania"] = np.nan existing_heating.loc["Montenegro"] = np.nan existing_heating.loc["Macedonia"] = np.nan From 2cc816d6d1df876fcafa33747fb2443a506f39ee Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Wed, 24 Jan 2024 13:38:39 +0100 Subject: [PATCH 08/42] solve_network: make sure infeasibilities are printed properly Without this formatting, there is an error adding a string to a list. --- scripts/solve_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 203d8b0fe..b3d33a270 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -889,7 +889,7 @@ def solve_network(n, config, solving, opts="", **kwargs): ) if "infeasible" in condition: labels = n.model.compute_infeasibilities() - logger.info("Labels:\n" + labels) + logger.info(f"Labels:\n{labels}") n.model.print_infeasibilities() raise RuntimeError("Solving status 'infeasible'") From 1ee4bf2ce8e8c3b3d4f0fc90a3a907f61a5bb3ba Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 18 Jan 2024 18:22:21 +0100 Subject: [PATCH 09/42] Update scripts/base_network.py Co-authored-by: Fabian Hofmann --- scripts/base_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/base_network.py b/scripts/base_network.py index eda29451f..15eddc34a 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -560,7 +560,7 @@ def prefer_voltage(x, which): ~buses["under_construction"] ) - c_nan_b = buses.country == "na" + c_nan_b = buses.country.fillna("na") == "na" if c_nan_b.sum() > 0: c_tag = _get_country(buses.loc[c_nan_b]) c_tag.loc[~c_tag.isin(countries)] = np.nan From 280781153fa6feeea905ea1fe434995a19c57730 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 29 Jan 2024 10:02:05 +0100 Subject: [PATCH 10/42] correctly source the existing heating technologies for buildings The source URL has changed. It represents the year 2012 and is only for buildings, not district heating. So the capacities for urban central are now set to zero from this source. --- doc/configtables/licenses-sector.csv | 2 +- scripts/add_existing_baseyear.py | 7 ------- scripts/build_existing_heating_distribution.py | 15 ++++++++++++--- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/doc/configtables/licenses-sector.csv b/doc/configtables/licenses-sector.csv index d65d3b36a..2c9775b06 100644 --- a/doc/configtables/licenses-sector.csv +++ b/doc/configtables/licenses-sector.csv @@ -11,7 +11,7 @@ BDEW heating profile,heat_load_profile_BDEW.csv,unknown,https://github.com/oemof heating profiles for Aarhus,heat_load_profile_DK_AdamJensen.csv,unknown,Adam Jensen MA thesis at Aarhus University George Lavidas wind/wave costs,WindWaveWEC_GLTB.xlsx,unknown,George Lavidas co2 budgets,co2_budget.csv,CC BY 4.0,https://arxiv.org/abs/2004.11009 -existing heating potentials,existing_infrastructure/existing_heating_raw.csv,unknown,https://ec.europa.eu/energy/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment_en?redir=1 +existing heating potentials,existing_infrastructure/existing_heating_raw.csv,unknown,https://energy.ec.europa.eu/publications/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment-fossilrenewables-1_en IRENA existing VRE capacities,existing_infrastructure/{solar|onwind|offwind}_capcity_IRENA.csv,unknown,https://www.irena.org/Statistics/Download-Data USGS ammonia production,myb1-2017-nitro.xls,unknown,https://www.usgs.gov/centers/nmic/nitrogen-statistics-and-information hydrogen salt cavern potentials,h2_salt_caverns_GWh_per_sqkm.geojson,CC BY 4.0,https://doi.org/10.1016/j.ijhydene.2019.12.161 https://doi.org/10.20944/preprints201910.0187.v1 diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index e7d766de6..db94caca4 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -407,13 +407,6 @@ def add_heating_capacities_installed_before_baseyear( """ logger.debug(f"Adding heating capacities installed before {baseyear}") - # Add existing heating capacities, data comes from the study - # "Mapping and analyses of the current and future (2020 - 2030) - # heating/cooling fuel deployment (fossil/renewables) " - # https://ec.europa.eu/energy/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment_en?redir=1 - # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". - # TODO start from original file - existing_heating = pd.read_csv(snakemake.input.existing_heating_distribution, header=[0,1], index_col=0) diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py index 9f9f265cd..c03ccb288 100644 --- a/scripts/build_existing_heating_distribution.py +++ b/scripts/build_existing_heating_distribution.py @@ -17,6 +17,14 @@ def build_existing_heating(): + # Add existing heating capacities, data comes from the study + # "Mapping and analyses of the current and future (2020 - 2030) + # heating/cooling fuel deployment (fossil/renewables) " + # https://energy.ec.europa.eu/publications/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment-fossilrenewables-1_en + # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". + # data is for buildings only (i.e. NOT district heating) and represents the year 2012 + # TODO start from original file + existing_heating = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) @@ -65,14 +73,15 @@ def build_existing_heating(): axis=0) - nodal_heat_name_fraction = pd.DataFrame(dtype=float) + nodal_heat_name_fraction = pd.DataFrame(index=district_heat_info.index, + dtype=float) - nodal_heat_name_fraction["urban central"] = dist_fraction + nodal_heat_name_fraction["urban central"] = 0. for sector in sectors: nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[sector]*(1 - urban_fraction) - nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[sector]*(urban_fraction - dist_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[sector]*urban_fraction nodal_heat_name_tech = pd.concat({name : nodal_heating .multiply(nodal_heat_name_fraction[name], From 8db560389e98bff2a44938f6307716e2a48b9b69 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 29 Jan 2024 11:25:09 +0100 Subject: [PATCH 11/42] for existing heating use new default_heating_lifetime This is because old costs default (25) is longer than all heating technologies (20). Script was distributing across 25 years, then throwing out boilers older than 20 years, an inconsistent behaviour. Now existing boilers are smoothly distributed across 20 years. --- config/config.default.yaml | 1 + scripts/add_existing_baseyear.py | 10 ++-------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/config/config.default.yaml b/config/config.default.yaml index 7f1f20349..2743f8dc3 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -345,6 +345,7 @@ existing_capacities: grouping_years_power: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030] grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020 threshold_capacity: 10 + default_heating_lifetime: 20 conventional_carriers: - lignite - coal diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index db94caca4..a943d7645 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -436,7 +436,7 @@ def add_heating_capacities_installed_before_baseyear( if int(grouping_year) + default_lifetime <= int(baseyear): continue - # installation is assumed to be linear for the past 25 years (default lifetime) + # installation is assumed to be linear for the past default_lifetime years ratio = (int(grouping_year) - int(grouping_years[i - 1])) / default_lifetime n.madd( @@ -540,12 +540,6 @@ def add_heating_capacities_installed_before_baseyear( ], ) - # drop assets which are at the end of their lifetime - links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index - logger.info("Removing following links because at end of their lifetime:") - logger.info(links_i) - n.mremove("Link", links_i) - if __name__ == "__main__": if "snakemake" not in globals(): @@ -602,7 +596,7 @@ def add_heating_capacities_installed_before_baseyear( .to_pandas() .reindex(index=n.snapshots) ) - default_lifetime = snakemake.params.costs["fill_values"]["lifetime"] + default_lifetime = snakemake.params.existing_capacities["default_heating_lifetime"] add_heating_capacities_installed_before_baseyear( n, baseyear, From 879e94c80f6cf5cb11dcd8de0a64c16647194fd8 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 29 Jan 2024 12:22:54 +0100 Subject: [PATCH 12/42] add new default to overdimension heating in individual buildings This allows them to cover heat demand peaks e.g. 10% higher than those in the data. The disadvantage of manipulating the costs is that the capacity is then not quite right. This way at least the costs are right. Doing it properly would require introducing artificial peaks, but this creates new problems (e.g. what is going on with wind/solar/other demand). --- config/config.default.yaml | 1 + scripts/prepare_sector_network.py | 19 +++++++++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/config/config.default.yaml b/config/config.default.yaml index 2743f8dc3..45690e410 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -449,6 +449,7 @@ sector: boilers: true oil_boilers: false biomass_boiler: true + overdimension_individual_heating: 1.1 #to cover demand peaks bigger than data chp: true micro_chp: false solar_thermal: true diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 3e0f388b3..3ecb43dd6 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1668,6 +1668,8 @@ def add_heat(n, costs): heat_demand = build_heat_demand(n) + overdim_factor = options["overdimension_individual_heating"] + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) dist_fraction = district_heat_info["district fraction of node"] @@ -1803,7 +1805,7 @@ def add_heat(n, costs): carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] - * costs.at[costs_name, "fixed"], + * costs.at[costs_name, "fixed"] * overdim_factor, p_nom_extendable=True, lifetime=costs.at[costs_name, "lifetime"], ) @@ -1872,7 +1874,8 @@ def add_heat(n, costs): bus1=nodes + f" {name} heat", carrier=name + " resistive heater", efficiency=costs.at[key, "efficiency"], - capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], + capital_cost=costs.at[key, "efficiency"] + * costs.at[key, "fixed"] * overdim_factor, p_nom_extendable=True, lifetime=costs.at[key, "lifetime"], ) @@ -1889,7 +1892,8 @@ def add_heat(n, costs): carrier=name + " gas boiler", efficiency=costs.at[key, "efficiency"], efficiency2=costs.at["gas", "CO2 intensity"], - capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], + capital_cost=costs.at[key, "efficiency"] + * costs.at[key, "fixed"] * overdim_factor, lifetime=costs.at[key, "lifetime"], ) @@ -1903,7 +1907,8 @@ def add_heat(n, costs): bus=nodes + f" {name} heat", carrier=name + " solar thermal", p_nom_extendable=True, - capital_cost=costs.at[name_type + " solar thermal", "fixed"], + capital_cost=costs.at[name_type + " solar thermal", "fixed"] + * overdim_factor, p_max_pu=solar_thermal[nodes], lifetime=costs.at[name_type + " solar thermal", "lifetime"], ) @@ -2318,7 +2323,8 @@ def add_biomass(n, costs): carrier=name + " biomass boiler", efficiency=costs.at["biomass boiler", "efficiency"], capital_cost=costs.at["biomass boiler", "efficiency"] - * costs.at["biomass boiler", "fixed"], + * costs.at["biomass boiler", "fixed"] + * options["overdimension_individual_heating"], marginal_cost=costs.at["biomass boiler", "pelletizing cost"], lifetime=costs.at["biomass boiler", "lifetime"], ) @@ -2776,7 +2782,8 @@ def add_industry(n, costs): efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] - * costs.at["decentral oil boiler", "fixed"], + * costs.at["decentral oil boiler", "fixed"] + * options["overdimension_individual_heating"], lifetime=costs.at["decentral oil boiler", "lifetime"], ) From 29e09789096bf5eba7136040763a78715b738194 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 29 Jan 2024 18:32:08 +0100 Subject: [PATCH 13/42] separate domestic and international aviation This is required so that other models can do CO2 limits on each type of aviation separately. --- scripts/prepare_sector_network.py | 55 +++++++++++++++++-------------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 3ecb43dd6..aa2e2906d 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -2868,7 +2868,7 @@ def add_industry(n, costs): p_set = ( demand_factor - * pop_weighted_energy_totals.loc[nodes, all_aviation].sum(axis=1) + * pop_weighted_energy_totals.loc[nodes, all_aviation] * 1e6 / nhours ).rename(lambda x: x + " kerosene for aviation") @@ -2876,32 +2876,37 @@ def add_industry(n, costs): if not options["regional_oil_demand"]: p_set = p_set.sum() - n.madd( - "Bus", - spatial.oil.kerosene, - location=spatial.oil.demand_locations, - carrier="kerosene for aviation", - unit="MWh_LHV", - ) + for scope in ["domestic", "international"]: - n.madd( - "Load", - spatial.oil.kerosene, - bus=spatial.oil.kerosene, - carrier="kerosene for aviation", - p_set=p_set, - ) + n.madd( + "Bus", + spatial.oil.kerosene, + suffix=f" {scope}", + location=spatial.oil.demand_locations, + carrier=f"kerosene for aviation {scope}", + unit="MWh_LHV", + ) - n.madd( - "Link", - spatial.oil.kerosene, - bus0=spatial.oil.nodes, - bus1=spatial.oil.kerosene, - bus2="co2 atmosphere", - carrier="kerosene for aviation", - p_nom_extendable=True, - efficiency2=costs.at["oil", "CO2 intensity"], - ) + n.madd( + "Load", + spatial.oil.kerosene, + suffix=f" {scope}", + bus=spatial.oil.kerosene + f" {scope}", + carrier=f"kerosene for aviation {scope}", + p_set=p_set[f"total {scope} aviation"], + ) + + n.madd( + "Link", + spatial.oil.kerosene, + suffix=f" {scope}", + bus0=spatial.oil.nodes, + bus1=spatial.oil.kerosene + f" {scope}", + bus2="co2 atmosphere", + carrier=f"kerosene for aviation {scope}", + p_nom_extendable=True, + efficiency2=costs.at["oil", "CO2 intensity"], + ) # TODO simplify bus expression n.madd( From b1ba64a84b184a5f26c6f30e094d503e2397a378 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 29 Jan 2024 18:36:17 +0100 Subject: [PATCH 14/42] add colours for domestic and international aviation --- config/config.default.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/config/config.default.yaml b/config/config.default.yaml index 45690e410..98e7fb228 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -976,6 +976,8 @@ plotting: Fischer-Tropsch: '#25c49a' liquid: '#25c49a' kerosene for aviation: '#a1ffe6' + kerosene for aviation international: '#a1ffe6' + kerosene for aviation domestic: '#90ccc6' naphtha for industry: '#57ebc4' methanolisation: '#83d6d5' methanol: '#468c8b' From e9be2032d37f3848d49de8176ef24c203a988c3d Mon Sep 17 00:00:00 2001 From: Michael Lindner Date: Tue, 30 Jan 2024 14:53:35 +0100 Subject: [PATCH 15/42] fix whitespace --- rules/build_sector.smk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rules/build_sector.smk b/rules/build_sector.smk index e1753a2b0..ed89f74b5 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -798,7 +798,7 @@ rule prepare_sector_network: industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", hourly_heat_demand_total=RESOURCES + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", - district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", From 276a79c3473a43849b17dec0db902304a5b3f4ec Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Thu, 1 Feb 2024 16:19:16 +0100 Subject: [PATCH 16/42] Revert "add colours for domestic and international aviation" This reverts commit b1ba64a84b184a5f26c6f30e094d503e2397a378. --- config/config.default.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/config/config.default.yaml b/config/config.default.yaml index 98e7fb228..45690e410 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -976,8 +976,6 @@ plotting: Fischer-Tropsch: '#25c49a' liquid: '#25c49a' kerosene for aviation: '#a1ffe6' - kerosene for aviation international: '#a1ffe6' - kerosene for aviation domestic: '#90ccc6' naphtha for industry: '#57ebc4' methanolisation: '#83d6d5' methanol: '#468c8b' From dcae9e19056e20c5169cf990c0ea08e3dfa23c10 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Thu, 1 Feb 2024 16:19:24 +0100 Subject: [PATCH 17/42] Revert "separate domestic and international aviation" This reverts commit 29e09789096bf5eba7136040763a78715b738194. --- scripts/prepare_sector_network.py | 55 ++++++++++++++----------------- 1 file changed, 25 insertions(+), 30 deletions(-) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index aa2e2906d..3ecb43dd6 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -2868,7 +2868,7 @@ def add_industry(n, costs): p_set = ( demand_factor - * pop_weighted_energy_totals.loc[nodes, all_aviation] + * pop_weighted_energy_totals.loc[nodes, all_aviation].sum(axis=1) * 1e6 / nhours ).rename(lambda x: x + " kerosene for aviation") @@ -2876,37 +2876,32 @@ def add_industry(n, costs): if not options["regional_oil_demand"]: p_set = p_set.sum() - for scope in ["domestic", "international"]: - - n.madd( - "Bus", - spatial.oil.kerosene, - suffix=f" {scope}", - location=spatial.oil.demand_locations, - carrier=f"kerosene for aviation {scope}", - unit="MWh_LHV", - ) + n.madd( + "Bus", + spatial.oil.kerosene, + location=spatial.oil.demand_locations, + carrier="kerosene for aviation", + unit="MWh_LHV", + ) - n.madd( - "Load", - spatial.oil.kerosene, - suffix=f" {scope}", - bus=spatial.oil.kerosene + f" {scope}", - carrier=f"kerosene for aviation {scope}", - p_set=p_set[f"total {scope} aviation"], - ) + n.madd( + "Load", + spatial.oil.kerosene, + bus=spatial.oil.kerosene, + carrier="kerosene for aviation", + p_set=p_set, + ) - n.madd( - "Link", - spatial.oil.kerosene, - suffix=f" {scope}", - bus0=spatial.oil.nodes, - bus1=spatial.oil.kerosene + f" {scope}", - bus2="co2 atmosphere", - carrier=f"kerosene for aviation {scope}", - p_nom_extendable=True, - efficiency2=costs.at["oil", "CO2 intensity"], - ) + n.madd( + "Link", + spatial.oil.kerosene, + bus0=spatial.oil.nodes, + bus1=spatial.oil.kerosene, + bus2="co2 atmosphere", + carrier="kerosene for aviation", + p_nom_extendable=True, + efficiency2=costs.at["oil", "CO2 intensity"], + ) # TODO simplify bus expression n.madd( From cc299f969e1535082bcf668e4afe206c57c8a920 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Fri, 2 Feb 2024 10:07:05 +0100 Subject: [PATCH 18/42] pre-commit formatting [no ci] --- rules/build_sector.smk | 11 ++- rules/common.smk | 2 +- rules/solve_myopic.smk | 6 +- scripts/add_brownfield.py | 4 +- scripts/add_existing_baseyear.py | 21 +++-- scripts/build_district_heat_share.py | 18 ++-- scripts/build_energy_totals.py | 16 ++-- .../build_existing_heating_distribution.py | 91 +++++++++++-------- scripts/build_hourly_heat_demand.py | 17 ++-- scripts/prepare_sector_network.py | 26 +++--- scripts/solve_network.py | 2 +- 11 files changed, 117 insertions(+), 97 deletions(-) diff --git a/rules/build_sector.smk b/rules/build_sector.smk index ed89f74b5..316134866 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -726,8 +726,6 @@ rule build_transport_demand: "../scripts/build_transport_demand.py" - - rule build_district_heat_share: params: sector=config["sector"], @@ -735,7 +733,8 @@ rule build_district_heat_share: district_heat_share=RESOURCES + "district_heat_share.csv", clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", output: - district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", threads: 1 resources: mem_mb=1000, @@ -797,8 +796,10 @@ rule prepare_sector_network: simplified_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - hourly_heat_demand_total=RESOURCES + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", - district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + hourly_heat_demand_total=RESOURCES + + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", diff --git a/rules/common.smk b/rules/common.smk index 0e85b620e..2298ff912 100644 --- a/rules/common.smk +++ b/rules/common.smk @@ -4,7 +4,7 @@ import os, sys, glob -helper_source_path = [match for match in glob.glob('**/_helpers.py', recursive=True)] +helper_source_path = [match for match in glob.glob("**/_helpers.py", recursive=True)] for path in helper_source_path: path = os.path.dirname(os.path.abspath(path)) diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 20043286f..743c6f69a 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -11,8 +11,10 @@ rule build_existing_heating_distribution: input: existing_heating="data/existing_infrastructure/existing_heating_raw.csv", clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - clustered_pop_energy_layout=RESOURCES + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", - district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + clustered_pop_energy_layout=RESOURCES + + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", output: existing_heating_distribution=RESOURCES + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", diff --git a/scripts/add_brownfield.py b/scripts/add_brownfield.py index e151c4416..cb1f51c8b 100644 --- a/scripts/add_brownfield.py +++ b/scripts/add_brownfield.py @@ -133,9 +133,7 @@ def disable_grid_expansion_if_LV_limit_hit(n): # allow small numerical differences if lv_limit - total_expansion < 1: - logger.info( - f"LV is already reached, disabling expansion and LV limit" - ) + logger.info(f"LV is already reached, disabling expansion and LV limit") extendable_acs = n.lines.query("s_nom_extendable").index n.lines.loc[extendable_acs, "s_nom_extendable"] = False n.lines.loc[extendable_acs, "s_nom"] = n.lines.loc[extendable_acs, "s_nom_min"] diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index a943d7645..cc65f254d 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -407,15 +407,13 @@ def add_heating_capacities_installed_before_baseyear( """ logger.debug(f"Adding heating capacities installed before {baseyear}") - existing_heating = pd.read_csv(snakemake.input.existing_heating_distribution, - header=[0,1], - index_col=0) - + existing_heating = pd.read_csv( + snakemake.input.existing_heating_distribution, header=[0, 1], index_col=0 + ) techs = existing_heating.columns.get_level_values(1).unique() for name in existing_heating.columns.get_level_values(0).unique(): - name_type = "central" if name == "urban central" else "decentral" nodes = pd.Index(n.buses.location[n.buses.index.str.contains(f"{name} heat")]) @@ -449,7 +447,9 @@ def add_heating_capacities_installed_before_baseyear( efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=existing_heating[(name, f"{heat_pump_type} heat pump")][nodes] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating[(name, f"{heat_pump_type} heat pump")][nodes] + * ratio + / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) @@ -511,10 +511,11 @@ def add_heating_capacities_installed_before_baseyear( efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], - p_nom= ( + p_nom=( existing_heating[(name, "oil boiler")][nodes] * ratio - / costs.at["decentral oil boiler", "efficiency"]), + / costs.at["decentral oil boiler", "efficiency"] + ), build_year=int(grouping_year), lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) @@ -596,7 +597,9 @@ def add_heating_capacities_installed_before_baseyear( .to_pandas() .reindex(index=n.snapshots) ) - default_lifetime = snakemake.params.existing_capacities["default_heating_lifetime"] + default_lifetime = snakemake.params.existing_capacities[ + "default_heating_lifetime" + ] add_heating_capacities_installed_before_baseyear( n, baseyear, diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py index d521214da..398083e79 100644 --- a/scripts/build_district_heat_share.py +++ b/scripts/build_district_heat_share.py @@ -6,12 +6,10 @@ Build district heat shares at each node, depending on investment year. """ -import pandas as pd - -from prepare_sector_network import get - import logging +import pandas as pd +from prepare_sector_network import get logger = logging.getLogger(__name__) @@ -28,11 +26,11 @@ investment_year = int(snakemake.wildcards.planning_horizons[-4:]) - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, - index_col=0) + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - district_heat_share = pd.read_csv(snakemake.input.district_heat_share, - index_col=0).squeeze() + district_heat_share = pd.read_csv( + snakemake.input.district_heat_share, index_col=0 + ).squeeze() # make ct-based share nodal district_heat_share = district_heat_share.loc[pop_layout.ct] @@ -61,7 +59,9 @@ # difference of max potential and today's share of district heating diff = (urban_fraction * central_fraction) - dist_fraction_node - progress = get(snakemake.config["sector"]["district_heating"]["progress"], investment_year) + progress = get( + snakemake.config["sector"]["district_heating"]["progress"], investment_year + ) dist_fraction_node += diff * progress logger.info( f"Increase district heating share by a progress factor of {progress:.2%} " diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index c00bf3b30..c92f8e64b 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -572,14 +572,15 @@ def build_energy_totals(countries, eurostat, swiss, idees): def build_district_heat_share(countries, idees): - # district heating share - district_heat = idees[ - ["derived heat residential", "derived heat services"] - ].sum(axis=1) - total_heat = idees[["thermal uses residential", "thermal uses services"]].sum(axis=1) + district_heat = idees[["derived heat residential", "derived heat services"]].sum( + axis=1 + ) + total_heat = idees[["thermal uses residential", "thermal uses services"]].sum( + axis=1 + ) - district_heat_share = district_heat/total_heat + district_heat_share = district_heat / total_heat district_heat_share = district_heat_share.reindex(countries) @@ -589,7 +590,8 @@ def build_district_heat_share(countries, idees): ) # make conservative assumption and take minimum from both data sets district_heat_share = pd.concat( - [district_heat_share, dh_share.reindex(index=district_heat_share.index) / 100], axis=1 + [district_heat_share, dh_share.reindex(index=district_heat_share.index) / 100], + axis=1, ).min(axis=1) district_heat_share.name = "district heat share" diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py index c03ccb288..50aea076f 100644 --- a/scripts/build_existing_heating_distribution.py +++ b/scripts/build_existing_heating_distribution.py @@ -6,17 +6,17 @@ Builds table of existing heat generation capacities for initial planning horizon. """ -import pandas as pd import sys -from pypsa.descriptors import Dict -import numpy as np + import country_converter as coco +import numpy as np +import pandas as pd +from pypsa.descriptors import Dict cc = coco.CountryConverter() def build_existing_heating(): - # Add existing heating capacities, data comes from the study # "Mapping and analyses of the current and future (2020 - 2030) # heating/cooling fuel deployment (fossil/renewables) " @@ -25,9 +25,9 @@ def build_existing_heating(): # data is for buildings only (i.e. NOT district heating) and represents the year 2012 # TODO start from original file - existing_heating = pd.read_csv(snakemake.input.existing_heating, - index_col=0, - header=0) + existing_heating = pd.read_csv( + snakemake.input.existing_heating, index_col=0, header=0 + ) # data for Albania, Montenegro and Macedonia not included in database existing_heating.loc["Albania"] = np.nan @@ -42,24 +42,25 @@ def build_existing_heating(): existing_heating.index = cc.convert(existing_heating.index, to="iso2") # coal and oil boilers are assimilated to oil boilers - existing_heating["oil boiler"] = existing_heating["oil boiler"] + existing_heating["coal boiler"] + existing_heating["oil boiler"] = ( + existing_heating["oil boiler"] + existing_heating["coal boiler"] + ) existing_heating.drop(["coal boiler"], axis=1, inplace=True) # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, - index_col=0) + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) nodal_heating = existing_heating.loc[pop_layout.ct] nodal_heating.index = pop_layout.index nodal_heating = nodal_heating.multiply(pop_layout.fraction, axis=0) - district_heat_info = pd.read_csv(snakemake.input.district_heat_share, - index_col=0) + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) dist_fraction = district_heat_info["district fraction of node"] urban_fraction = district_heat_info["urban fraction"] - energy_layout = pd.read_csv(snakemake.input.clustered_pop_energy_layout, - index_col=0) + energy_layout = pd.read_csv( + snakemake.input.clustered_pop_energy_layout, index_col=0 + ) uses = ["space", "water"] sectors = ["residential", "services"] @@ -67,44 +68,54 @@ def build_existing_heating(): nodal_sectoral_totals = pd.DataFrame(dtype=float) for sector in sectors: - nodal_sectoral_totals[sector] = energy_layout[[f"total {sector} {use}" for use in uses]].sum(axis=1) + nodal_sectoral_totals[sector] = energy_layout[ + [f"total {sector} {use}" for use in uses] + ].sum(axis=1) - nodal_sectoral_fraction = nodal_sectoral_totals.div(nodal_sectoral_totals.sum(axis=1), - axis=0) + nodal_sectoral_fraction = nodal_sectoral_totals.div( + nodal_sectoral_totals.sum(axis=1), axis=0 + ) + nodal_heat_name_fraction = pd.DataFrame(index=district_heat_info.index, dtype=float) - nodal_heat_name_fraction = pd.DataFrame(index=district_heat_info.index, - dtype=float) - - nodal_heat_name_fraction["urban central"] = 0. + nodal_heat_name_fraction["urban central"] = 0.0 for sector in sectors: - - nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[sector]*(1 - urban_fraction) - nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[sector]*urban_fraction - - - nodal_heat_name_tech = pd.concat({name : nodal_heating .multiply(nodal_heat_name_fraction[name], - axis=0) for name in nodal_heat_name_fraction.columns}, - axis=1, - names=["heat name","technology"]) - - - #move all ground HPs to rural, all air to urban + nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[ + sector + ] * (1 - urban_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = ( + nodal_sectoral_fraction[sector] * urban_fraction + ) + + nodal_heat_name_tech = pd.concat( + { + name: nodal_heating.multiply(nodal_heat_name_fraction[name], axis=0) + for name in nodal_heat_name_fraction.columns + }, + axis=1, + names=["heat name", "technology"], + ) + + # move all ground HPs to rural, all air to urban for sector in sectors: - nodal_heat_name_tech[(f"{sector} rural","ground heat pump")] += (nodal_heat_name_tech[("urban central","ground heat pump")]*nodal_sectoral_fraction[sector] - + nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")]) - nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")] = 0. + nodal_heat_name_tech[(f"{sector} rural", "ground heat pump")] += ( + nodal_heat_name_tech[("urban central", "ground heat pump")] + * nodal_sectoral_fraction[sector] + + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] + ) + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] = 0.0 - nodal_heat_name_tech[(f"{sector} urban decentral","air heat pump")] += nodal_heat_name_tech[(f"{sector} rural","air heat pump")] - nodal_heat_name_tech[(f"{sector} rural","air heat pump")] = 0. + nodal_heat_name_tech[ + (f"{sector} urban decentral", "air heat pump") + ] += nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] + nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] = 0.0 - nodal_heat_name_tech[("urban central","ground heat pump")] = 0. + nodal_heat_name_tech[("urban central", "ground heat pump")] = 0.0 nodal_heat_name_tech.to_csv(snakemake.output.existing_heating_distribution) if __name__ == "__main__": - build_existing_heating() diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py index 94ad72664..69706606b 100644 --- a/scripts/build_hourly_heat_demand.py +++ b/scripts/build_hourly_heat_demand.py @@ -6,12 +6,11 @@ Build hourly heat demand time series from daily ones. """ +from itertools import product + import pandas as pd import xarray as xr from _helpers import generate_periodic_profiles, update_config_with_sector_opts -from itertools import product - - if __name__ == "__main__": if "snakemake" not in globals(): @@ -48,21 +47,21 @@ ) if use == "space": - heat_demand[f"{sector} {use}"] = daily_space_heat_demand * intraday_year_profile + heat_demand[f"{sector} {use}"] = ( + daily_space_heat_demand * intraday_year_profile + ) else: heat_demand[f"{sector} {use}"] = intraday_year_profile - heat_demand = pd.concat(heat_demand, - axis=1, - names = ["sector use", "node"]) + heat_demand = pd.concat(heat_demand, axis=1, names=["sector use", "node"]) - heat_demand.index.name="snapshots" + heat_demand.index.name = "snapshots" print(heat_demand) print(heat_demand.stack()) - ds = heat_demand.stack().to_xarray()#xr.Dataset.from_dataframe(heat_demand) + ds = heat_demand.stack().to_xarray() # xr.Dataset.from_dataframe(heat_demand) print(ds) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 3ecb43dd6..01cae430d 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1629,8 +1629,11 @@ def add_land_transport(n, costs): def build_heat_demand(n): - - heat_demand_shape = xr.open_dataset(snakemake.input.hourly_heat_demand_total).to_dataframe().unstack(level=1) + heat_demand_shape = ( + xr.open_dataset(snakemake.input.hourly_heat_demand_total) + .to_dataframe() + .unstack(level=1) + ) sectors = ["residential", "services"] uses = ["water", "space"] @@ -1638,7 +1641,6 @@ def build_heat_demand(n): heat_demand = {} electric_heat_supply = {} for sector, use in product(sectors, uses): - name = f"{sector} {use}" heat_demand[name] = ( @@ -1670,8 +1672,7 @@ def add_heat(n, costs): overdim_factor = options["overdimension_individual_heating"] - district_heat_info = pd.read_csv(snakemake.input.district_heat_share, - index_col=0) + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) dist_fraction = district_heat_info["district fraction of node"] urban_fraction = district_heat_info["urban fraction"] @@ -1710,7 +1711,6 @@ def add_heat(n, costs): # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 solar_thermal = options["solar_cf_correction"] * solar_thermal / 1e3 - for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" @@ -1805,7 +1805,8 @@ def add_heat(n, costs): carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] - * costs.at[costs_name, "fixed"] * overdim_factor, + * costs.at[costs_name, "fixed"] + * overdim_factor, p_nom_extendable=True, lifetime=costs.at[costs_name, "lifetime"], ) @@ -1875,7 +1876,8 @@ def add_heat(n, costs): carrier=name + " resistive heater", efficiency=costs.at[key, "efficiency"], capital_cost=costs.at[key, "efficiency"] - * costs.at[key, "fixed"] * overdim_factor, + * costs.at[key, "fixed"] + * overdim_factor, p_nom_extendable=True, lifetime=costs.at[key, "lifetime"], ) @@ -1893,7 +1895,8 @@ def add_heat(n, costs): efficiency=costs.at[key, "efficiency"], efficiency2=costs.at["gas", "CO2 intensity"], capital_cost=costs.at[key, "efficiency"] - * costs.at[key, "fixed"] * overdim_factor, + * costs.at[key, "fixed"] + * overdim_factor, lifetime=costs.at[key, "lifetime"], ) @@ -2953,8 +2956,9 @@ def add_industry(n, costs): if options["co2_spatial"] or options["co2network"]: p_set = ( - -industrial_demand.loc[nodes, "process emission"] - .rename(index=lambda x: x + " process emissions") + -industrial_demand.loc[nodes, "process emission"].rename( + index=lambda x: x + " process emissions" + ) / nhours ) else: diff --git a/scripts/solve_network.py b/scripts/solve_network.py index b3d33a270..24c1b63ac 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -856,7 +856,7 @@ def solve_network(n, config, solving, opts="", **kwargs): kwargs["assign_all_duals"] = cf_solving.get("assign_all_duals", False) if kwargs["solver_name"] == "gurobi": - logging.getLogger('gurobipy').setLevel(logging.CRITICAL) + logging.getLogger("gurobipy").setLevel(logging.CRITICAL) rolling_horizon = cf_solving.pop("rolling_horizon", False) skip_iterations = cf_solving.pop("skip_iterations", False) From 7c0c843d80ec46421ac97370669b6899d0b1736a Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Fri, 2 Feb 2024 10:08:01 +0100 Subject: [PATCH 19/42] update git-blame-ignore-revs to exclude autoformat --- .git-blame-ignore-revs | 1 + 1 file changed, 1 insertion(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 0b78b5b64..564d78ff8 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -6,3 +6,4 @@ 5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0 92080b1cd2ca5f123158571481722767b99c2b27 13769f90af4500948b0376d57df4cceaa13e78b5 +cc299f969e1535082bcf668e4afe206c57c8a920 From 747dd527aad00f1a704c81910137bca179977267 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 14:31:10 +0100 Subject: [PATCH 20/42] cluster_gas_network: generalise so it can be used elsewhere --- scripts/cluster_gas_network.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/cluster_gas_network.py b/scripts/cluster_gas_network.py index e7554dff3..4b88ef308 100755 --- a/scripts/cluster_gas_network.py +++ b/scripts/cluster_gas_network.py @@ -75,10 +75,10 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25): return df -def reindex_pipes(df): +def reindex_pipes(df, prefix="gas pipeline"): def make_index(x): connector = " <-> " if x.bidirectional else " -> " - return "gas pipeline " + x.bus0 + connector + x.bus1 + return prefix + " " + x.bus0 + connector + x.bus1 df.index = df.apply(make_index, axis=1) From 58806423867390d62f7ee80617b1a63ffdafa89e Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 5 Feb 2024 18:04:30 +0100 Subject: [PATCH 21/42] allow mock_snakemake to run in submodule setup --- scripts/_helpers.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 9945f70f0..c574d5ee1 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -193,7 +193,13 @@ def update_to(b=1, bsize=1, tsize=None): urllib.request.urlretrieve(url, file, reporthook=update_to) -def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): +def mock_snakemake( + rulename, + root_dir=None, + configfiles=[], + submodule_dir="workflow/submodules/pypsa-eur", + **wildcards, +): """ This function is expected to be executed from the 'scripts'-directory of ' the snakemake project. It returns a snakemake.script.Snakemake object, @@ -209,6 +215,9 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): path to the root directory of the snakemake project configfiles: list, str list of configfiles to be used to update the config + submodule_dir: str, Path + in case PyPSA-Eur is used as a submodule, submodule_dir is + the path of pypsa-eur relative to the project directory. **wildcards: keyword arguments fixing the wildcards. Only necessary if wildcards are needed. @@ -227,7 +236,10 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): root_dir = Path(root_dir).resolve() user_in_script_dir = Path.cwd().resolve() == script_dir - if user_in_script_dir: + if str(submodule_dir) in __file__: + # the submodule_dir path is only need to locate the project dir + os.chdir(Path(__file__[: __file__.find(str(submodule_dir))])) + elif user_in_script_dir: os.chdir(root_dir) elif Path.cwd().resolve() != root_dir: raise RuntimeError( From f1420e6b688cdda24b365b75c7a35b5579e6c58d Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 5 Feb 2024 18:05:32 +0100 Subject: [PATCH 22/42] lossy_bidirectional_links: allow to specify subset of links --- scripts/prepare_sector_network.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 01cae430d..7e2cd649f 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -3481,10 +3481,12 @@ def set_temporal_aggregation(n, opts, solver_name): return n -def lossy_bidirectional_links(n, carrier, efficiencies={}): +def lossy_bidirectional_links(n, carrier, efficiencies={}, subset=None): "Split bidirectional links into two unidirectional links to include transmission losses." - carrier_i = n.links.query("carrier == @carrier").index + if subset is None: + subset = n.links.index + carrier_i = n.links.query("carrier == @carrier").index.intersection(subset) if ( not any((v != 1.0) or (v >= 0) for v in efficiencies.values()) From f12452089e2590362393149ca4c5c336302f9064 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Wed, 7 Feb 2024 17:53:10 +0100 Subject: [PATCH 23/42] add logs to plot_*_network rules --- rules/postprocess.smk | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 2e90d2333..18d65c163 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -41,6 +41,11 @@ if config["foresight"] != "perfect": threads: 2 resources: mem_mb=10000, + log: + ( + LOGS + + "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" + ) benchmark: ( BENCHMARKS @@ -65,6 +70,11 @@ if config["foresight"] != "perfect": threads: 2 resources: mem_mb=10000, + log: + ( + LOGS + + "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" + ) benchmark: ( BENCHMARKS @@ -88,6 +98,11 @@ if config["foresight"] != "perfect": threads: 2 resources: mem_mb=10000, + log: + ( + LOGS + + "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" + ) benchmark: ( BENCHMARKS From 70f4f1d2ceefdb376cd4431226346d7354167d80 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Wed, 7 Feb 2024 18:50:20 +0100 Subject: [PATCH 24/42] fix wrong merge decision - code block was removed on purpose --- scripts/add_existing_baseyear.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index 8852e58c7..442009ce5 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -543,12 +543,6 @@ def add_heating_capacities_installed_before_baseyear( ], ) - # drop assets which are at the end of their lifetime - links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index - logger.info("Removing following links because at end of their lifetime:") - logger.info(links_i) - n.mremove("Link", links_i) - if __name__ == "__main__": if "snakemake" not in globals(): From e2bd3926b39396825c2dcc7f10edcf791dfe4ae9 Mon Sep 17 00:00:00 2001 From: Philipp Glaum Date: Fri, 9 Feb 2024 13:47:24 +0100 Subject: [PATCH 25/42] implement statistics post processing for csv creation and plotting. --- config/config.default.yaml | 9 ++- rules/collect.smk | 19 +++++ rules/postprocess.smk | 89 ++++++++++++++++++++- scripts/make_statistics_csv.py | 98 +++++++++++++++++++++++ scripts/plot_statistics_comparison.py | 109 ++++++++++++++++++++++++++ scripts/plot_statistics_single.py | 79 +++++++++++++++++++ 6 files changed, 400 insertions(+), 3 deletions(-) create mode 100644 scripts/make_statistics_csv.py create mode 100644 scripts/plot_statistics_comparison.py create mode 100644 scripts/plot_statistics_single.py diff --git a/config/config.default.yaml b/config/config.default.yaml index 93b0568e3..5101b66e1 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -44,7 +44,7 @@ scenario: opts: - '' sector_opts: - - Co2L0-3H-T-H-B-I-A-dist1 + - Co2L0-3h-T-H-B-I-A-dist1 planning_horizons: # - 2020 # - 2030 @@ -813,6 +813,13 @@ plotting: energy_max: 20000 energy_min: -20000 energy_threshold: 50. + countries: + - all + carriers: + - electricity + - heat + carrier_groups: + electricity: [AC, low_voltage] nice_names: OCGT: "Open-Cycle Gas" diff --git a/rules/collect.smk b/rules/collect.smk index dc0a94cc6..caa46e9bc 100644 --- a/rules/collect.smk +++ b/rules/collect.smk @@ -81,3 +81,22 @@ rule validate_elec_networks: **config["scenario"], kind=["production", "prices", "cross_border"], ), + + +rule plot_statistics: + input: + [ + expand( + RESULTS + + "statistics/figures/comparison/country_{country}/.statistics_{carrier}_plots", + country=config["plotting"].get("countries", "all"), + carrier=config["plotting"].get("carriers", ["all"]), + ), + expand( + RESULTS + + "statistics/figures/single/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/.statistics_{carrier}_plots", + **config["scenario"], + country=config["plotting"].get("countries", "all"), + carrier=config["plotting"].get("carriers", ["all"]), + ), + ], diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 2e90d2333..ce3141d8e 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -257,14 +257,99 @@ STATISTICS_BARPLOTS = [ "capacity_factor", "installed_capacity", "optimal_capacity", - "capital_expenditure", - "operational_expenditure", + "capex", + "opex", "curtailment", "supply", "withdrawal", "market_value", + "energy_balance", ] +STATISTICS_CSV = [ + "capacity_factor", + "installed_capacity", + "optimal_capacity", + "capex", + "opex", + "curtailment", + "supply", + "withdrawal", + "market_value", +] + + +rule save_statistics_csv: + params: + barplots=STATISTICS_CSV, + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + output: + **{ + f"{csv}": RESULTS + + "statistics/csv/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/{carrier}_" + + f"{csv}.csv" + for carrier in config["plotting"].get("carriers", "all") + for csv in STATISTICS_CSV + }, + csv_touch=RESULTS + + "statistics/csv/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/.statistics_{carrier}_csv", + script: + "../scripts/make_statistics_csv.py" + + +rule plot_statistics_single: + params: + plotting=config["plotting"], + barplots=STATISTICS_BARPLOTS, + input: + **{ + f"{csv}": RESULTS + + "statistics/csv/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/{carrier}_" + + f"{csv}.csv" + for carrier in config["plotting"].get("carriers", "all") + for csv in STATISTICS_CSV + }, + output: + **{ + f"{plot}": RESULTS + + "statistics/figures/single/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/{carrier}_" + + f"{plot}.pdf" + for carrier in config["plotting"].get("carriers", "all") + for plot in STATISTICS_BARPLOTS + }, + barplots_touch=RESULTS + + "statistics/figures/single/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/.statistics_{carrier}_plots", + script: + "../scripts/plot_statistics_single.py" + + +rule plot_statistics_comparison: + params: + plotting=config["plotting"], + barplots=STATISTICS_BARPLOTS, + input: + expand( + RESULTS + + "statistics/csv/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/{carrier}_{csv}.csv", + **config["scenario"], + csv=STATISTICS_CSV, + allow_missing=True, + ), + output: + **{ + f"{plot}": RESULTS + + "statistics/figures/comparison/country_{country}/{carrier}_" + + f"{plot}.pdf" + for carrier in config["plotting"].get("carriers", "all") + for plot in STATISTICS_BARPLOTS + }, + barplots_touch=RESULTS + + "statistics/figures/comparison/country_{country}/.statistics_{carrier}_plots", + script: + "../scripts/plot_statistics_comparison.py" + rule plot_elec_statistics: params: diff --git a/scripts/make_statistics_csv.py b/scripts/make_statistics_csv.py new file mode 100644 index 000000000..7dfa84044 --- /dev/null +++ b/scripts/make_statistics_csv.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +import seaborn as sns +from _helpers import configure_logging +from pypsa.statistics import get_carrier + + +# grouperfunctions = hier schreiben und dann in statistics. +def groupby_country_and_carrier(n, c, nice_names=False): + df = n.df(c) + bus = "bus1" if "bus" not in n.df(c) else "bus" + country = df[bus].map(n.buses.location).map(n.buses.country).rename("country") + carrier = get_carrier(n, c, nice_names) + return [country, carrier] + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "save_statistics_csv", + simpl="", + ll="v1.5", + clusters="5", + opts="", + sector_opts="24h-T-H-B-I-A-dist1", + planning_horizons="2040", + country="all", + carrier="electricity", + ) + # configure_logging(snakemake) + config = snakemake.config + + n = pypsa.Network(snakemake.input.network) + + kwargs = {"nice_names": False} + + wildcards = dict(snakemake.wildcards) + carrier = wildcards.pop("carrier") + country = wildcards.pop("country") + + if carrier == "all": + pass + elif carrier in config["plotting"].get("carrier_groups", []): + bus_carrier = config["plotting"]["carrier_groups"][carrier] + kwargs["bus_carrier"] = bus_carrier + elif n.buses.carrier.str.contains(carrier).any(): + if carrier not in n.buses.carrier.unique(): + carrier = [ + bus_carrier + for bus_carrier in n.buses.carrier.unique() + if carrier in bus_carrier + ] + bus_carrier = carrier + kwargs["bus_carrier"] = bus_carrier + else: + raise ValueError( + f"Carrier {carrier} not found in network or carrier group in config." + ) + + if country == "all" or not country: + kwargs["groupby"] = get_carrier + else: + kwargs["groupby"] = groupby_country_and_carrier + + for output in snakemake.output.keys(): + if "touch" in output: + continue + if output != "energy_balance": + func = eval(f"n.statistics.{output}") + try: + ds = func(**kwargs) + if ds.empty: + print( + f"Empty series for {output} with bus carrier {bus_carrier} and country {country}." + ) + pd.Series().to_csv(snakemake.output[output]) + continue + except Exception as e: + print(f"An error occurred: {e}") + pd.Series().to_csv(snakemake.output[output]) + pass + continue + if country and country != "all": + ds = ds.xs(country, level="country") + ds.name = ds.attrs["name"] + ds.dropna().to_csv(snakemake.output[output]) + # touch file + with open(snakemake.output.csv_touch, "a"): + pass diff --git a/scripts/plot_statistics_comparison.py b/scripts/plot_statistics_comparison.py new file mode 100644 index 000000000..73776dd51 --- /dev/null +++ b/scripts/plot_statistics_comparison.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + +import re + +import matplotlib.pyplot as plt +import matplotlib.ticker as ticker +import pandas as pd +import seaborn as sns +from _helpers import configure_logging +from plot_summary import rename_techs + +sns.set_theme("paper", style="whitegrid") +STACKED = { + "capacity_factor": False, + "market_value": False, +} + + +def rename_index(df): + # rename index and drop duplicates + specific = df.index.map(lambda x: f"{x[1]}({x[0]})") + generic = df.index.get_level_values("carrier") + duplicated = generic.duplicated(keep=False) + index = specific.where(duplicated, generic) + df = df.set_axis(index) + # rename columns and drop duplicates + columns = df.columns.str.split("_", expand=True) + columns = [ + columns.get_level_values(level).unique() + for level in range(columns.nlevels) + if not columns.get_level_values(level).duplicated(keep=False).all() + ] + columns = pd.MultiIndex.from_arrays(columns) + df.columns = columns.map("\n".join) + return df + + +def plot_static_comparison(df, ax, stacked=False): + df = df[df != 0] + df = df.dropna(axis=0, how="all").fillna(0) + c = tech_colors[df.index.get_level_values("carrier").map(rename_techs)] + df = df.pipe(rename_index).T + df.plot.bar(color=c.values, ax=ax, stacked=stacked, legend=False) + ax.legend( + bbox_to_anchor=(1, 1), + loc="upper left", + ) + ax.grid(axis="x") + + +def read_csv(input, output): + try: + files = list(filter(lambda x: output in x, input)) + pattern = r"elec_.*?(\d{4})" + network_labels = [re.search(pattern, f).group() for f in files] + df = pd.concat( + [pd.read_csv(f).set_index(["component", "carrier"]) for f in files], + axis=1, + keys=network_labels, + ) + # get plot label and drop from index + label = df.columns.get_level_values(1).unique()[0] + df.columns = df.columns.droplevel(1) + except Exception as e: + print(f"Error reading csv file for {output}: {e}") + df = pd.DataFrame() + return df + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_statistics_comparison", + country="all", + carrier="electricity", + ) + configure_logging(snakemake) + + tech_colors = pd.Series(snakemake.params.plotting["tech_colors"]) + + for output in snakemake.output.keys(): + if "touch" in output: + with open(snakemake.output.barplots_touch, "a"): + pass + continue + fig, ax = plt.subplots() + if "energy_balance" not in output: + df = read_csv(snakemake.input, output) + else: + supply = read_csv(snakemake.input, "supply") + withdrawal = read_csv(snakemake.input, "withdrawal") + df = ( + pd.concat([supply, withdrawal.mul(-1)]) + .groupby(["component", "carrier"]) + .sum() + ) + if df.empty: + fig.savefig(snakemake.output[output]) + continue + + plot_static_comparison(df, ax, stacked=STACKED.get(output, True)) + + fig.savefig(snakemake.output[output], bbox_inches="tight") diff --git a/scripts/plot_statistics_single.py b/scripts/plot_statistics_single.py new file mode 100644 index 000000000..dfb709509 --- /dev/null +++ b/scripts/plot_statistics_single.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + +import matplotlib.pyplot as plt +import pandas as pd +import seaborn as sns +from _helpers import configure_logging +from plot_summary import rename_techs +from pypsa.statistics import get_carrier + +sns.set_theme("paper", style="whitegrid") + + +def rename_index(ds): + specific = ds.index.map(lambda x: f"{x[1]}\n({x[0]})") + generic = ds.index.get_level_values("carrier") + duplicated = generic.duplicated(keep=False) + index = specific.where(duplicated, generic) + return ds.set_axis(index) + + +def plot_static_per_carrier(ds, ax, drop_zero=True): + ds = ds[ds != 0] + ds = ds.dropna() + c = tech_colors[ds.index.get_level_values("carrier").map(rename_techs)] + ds = ds.pipe(rename_index) + ds.T.plot.barh(color=c.values, ax=ax) + ax.grid(axis="x") + + +def read_csv(input): + try: + df = pd.read_csv(input) + df = df.set_index(["component", "carrier"]).squeeze() + except Exception as e: + print(f"An error occurred reading file {input}: {e}") + df = pd.DataFrame() + return df + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_statistics_single", + simpl="", + ll="v1.5", + clusters="5", + opts="", + sector_opts="24h-T-H-B-I-A-dist1", + planning_horizons="2040", + country="all", + carrier="heat", + ) + configure_logging(snakemake) + + tech_colors = pd.Series(snakemake.params.plotting["tech_colors"]) + + for output in snakemake.output.keys(): + if "touch" in output: + with open(snakemake.output.barplots_touch, "a"): + pass + continue + fig, ax = plt.subplots() + if output != "energy_balance": + ds = read_csv(snakemake.input[output]) + else: + supply = read_csv(snakemake.input["supply"]) + withdrawal = read_csv(snakemake.input["withdrawal"]) + ds = pd.concat([supply, withdrawal.mul(-1)]) + if ds.empty: + fig.savefig(snakemake.output[output]) + continue + plot_static_per_carrier(ds, ax) + fig.savefig(snakemake.output[output], bbox_inches="tight") From b67b025913b5ba05fc8a0dd564dbf9bb5b417300 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Fri, 9 Feb 2024 18:36:16 +0100 Subject: [PATCH 26/42] bugfix: coal emissions for industry weren't tracked Also allow industrial coal demand to be regional (so we can include them in regional CO2 constraints). --- config/config.default.yaml | 1 + scripts/prepare_sector_network.py | 39 +++++++++++++++++++++++++++---- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/config/config.default.yaml b/config/config.default.yaml index b24f2cebb..dcdf371d4 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -501,6 +501,7 @@ sector: SMR_cc: true regional_methanol_demand: false regional_oil_demand: false + regional_coal_demand: false regional_co2_sequestration_potential: enable: false attribute: 'conservative estimate Mt' diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index c080126c2..62671a180 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -175,6 +175,13 @@ def define_spatial(nodes, options): spatial.coal.nodes = ["EU coal"] spatial.coal.locations = ["EU"] + if options["regional_coal_demand"]: + spatial.coal.demand_locations = nodes + spatial.coal.industry = nodes + " coal for industry" + else: + spatial.coal.demand_locations = ["EU"] + spatial.coal.industry = ["EU coal for industry"] + # lignite spatial.lignite = SimpleNamespace() spatial.lignite.nodes = ["EU lignite"] @@ -3060,19 +3067,41 @@ def add_industry(n, costs): mwh_coal_per_mwh_coke = 1.366 # from eurostat energy balance p_set = ( - industrial_demand["coal"].sum() - + mwh_coal_per_mwh_coke * industrial_demand["coke"].sum() + industrial_demand["coal"] + + mwh_coal_per_mwh_coke * industrial_demand["coke"] ) / nhours + if not options["regional_coal_demand"]: + p_set = p_set.sum() + + n.madd( + "Bus", + spatial.coal.industry, + location=spatial.coal.demand_locations, + carrier="coal for industry", + unit="MWh_LHV", + ) + n.madd( "Load", - spatial.coal.nodes, - suffix=" for industry", - bus=spatial.coal.nodes, + spatial.coal.industry, + bus=spatial.coal.industry, carrier="coal for industry", p_set=p_set, ) + n.madd( + "Link", + spatial.coal.industry, + bus0=spatial.coal.nodes, + bus1=spatial.coal.industry, + bus2="co2 atmosphere", + carrier="coal for industry", + p_nom_extendable=True, + efficiency2=costs.at["coal", "CO2 intensity"], + ) + + def add_waste_heat(n): # TODO options? From 6dbb3959080ece1687278c2f538d600cc7df628d Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Fri, 9 Feb 2024 19:03:39 +0100 Subject: [PATCH 27/42] bugfix: make sure coal demand is there with regional demand --- scripts/prepare_sector_network.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 62671a180..53372e562 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -3071,6 +3071,9 @@ def add_industry(n, costs): + mwh_coal_per_mwh_coke * industrial_demand["coke"] ) / nhours + p_set.rename(lambda x: x + " coal for industry", + inplace=True) + if not options["regional_coal_demand"]: p_set = p_set.sum() From ba11c2bcceb84518f96fa07c9f31ed6a5cfdf4d1 Mon Sep 17 00:00:00 2001 From: Philipp Glaum Date: Mon, 12 Feb 2024 09:42:39 +0100 Subject: [PATCH 28/42] add release note and documentation --- doc/configtables/plotting.csv | 9 ++++++--- doc/release_notes.rst | 1 + scripts/plot_statistics_comparison.py | 1 + scripts/plot_statistics_single.py | 2 +- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/doc/configtables/plotting.csv b/doc/configtables/plotting.csv index 82fc203c0..3124ce306 100644 --- a/doc/configtables/plotting.csv +++ b/doc/configtables/plotting.csv @@ -1,13 +1,16 @@ ,Unit,Values,Description map,,, -- boundaries,°,"[x1,x2,y1,y2]",Boundaries of the map plots in degrees latitude (y) and longitude (x) -projection,,,, --- name,--,"Valid Cartopy projection name","See https://scitools.org.uk/cartopy/docs/latest/reference/projections.html for list of available projections." +projection,,, +-- name,--,Valid Cartopy projection name,See https://scitools.org.uk/cartopy/docs/latest/reference/projections.html for list of available projections. -- args,--,--,"Other entries under 'projection' are passed as keyword arguments to the projection constructor, e.g. ``central_longitude: 10.``." costs_max,bn Euro,float,Upper y-axis limit in cost bar plots. costs_threshold,bn Euro,float,Threshold below which technologies will not be shown in cost bar plots. energy_max,TWh,float,Upper y-axis limit in energy bar plots. energy_min,TWh,float,Lower y-axis limit in energy bar plots. energy_threshold,TWh,float,Threshold below which technologies will not be shown in energy bar plots. -tech_colors,--,carrier -> HEX colour code,Mapping from network ``carrier`` to a colour (`HEX colour code `_). +countries,--,[str],List of countries you want to plot in your statistics figures. Default is all coutries but also single countries can be defined like ``DE``. +carriers,--,[str],Carrier you want to plot from in your statistics figures. This carrier must be an available bus carrier in your network or substring of a carrier or a defined carrier group. +carrier_groups,--,str -> carrier,Mapping from ``carrier_groups`` to individual bus carriers in the network. nice_names,--,str -> str,Mapping from network ``carrier`` to a more readable name. +tech_colors,--,carrier -> HEX colour code,Mapping from network ``carrier`` to a colour (`HEX colour code `_). diff --git a/doc/release_notes.rst b/doc/release_notes.rst index ee7bd64b5..2a3d731c5 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -10,6 +10,7 @@ Release Notes Upcoming Release ================ +* Add plotting routine with statistics where we allow for plotting of individual energy carriers and countries. Besides the plots, we create all necessary csv files for the plotting routine. * Add new default to overdimension heating in individual buildings. This allows them to cover heat demand peaks e.g. 10% higher than those in the data. The disadvantage of manipulating the costs is that the capacity is then not quite diff --git a/scripts/plot_statistics_comparison.py b/scripts/plot_statistics_comparison.py index 73776dd51..63287c100 100644 --- a/scripts/plot_statistics_comparison.py +++ b/scripts/plot_statistics_comparison.py @@ -54,6 +54,7 @@ def plot_static_comparison(df, ax, stacked=False): def read_csv(input, output): try: + # filter required csv to plot the wanted output files = list(filter(lambda x: output in x, input)) pattern = r"elec_.*?(\d{4})" network_labels = [re.search(pattern, f).group() for f in files] diff --git a/scripts/plot_statistics_single.py b/scripts/plot_statistics_single.py index dfb709509..780fa62cb 100644 --- a/scripts/plot_statistics_single.py +++ b/scripts/plot_statistics_single.py @@ -22,7 +22,7 @@ def rename_index(ds): return ds.set_axis(index) -def plot_static_per_carrier(ds, ax, drop_zero=True): +def plot_static_per_carrier(ds, ax): ds = ds[ds != 0] ds = ds.dropna() c = tech_colors[ds.index.get_level_values("carrier").map(rename_techs)] From 5b2cbcd9abad3e32129d39f5d1b50895b200104f Mon Sep 17 00:00:00 2001 From: Philipp Glaum Date: Mon, 12 Feb 2024 09:46:03 +0100 Subject: [PATCH 29/42] change statistics script name --- rules/postprocess.smk | 8 ++++---- scripts/{make_statistics_csv.py => write_statistics.py} | 0 2 files changed, 4 insertions(+), 4 deletions(-) rename scripts/{make_statistics_csv.py => write_statistics.py} (100%) diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 9d19d4632..c7263a52d 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -45,7 +45,7 @@ if config["foresight"] != "perfect": ( LOGS + "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" - ) + ), benchmark: ( BENCHMARKS @@ -74,7 +74,7 @@ if config["foresight"] != "perfect": ( LOGS + "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" - ) + ), benchmark: ( BENCHMARKS @@ -102,7 +102,7 @@ if config["foresight"] != "perfect": ( LOGS + "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" - ) + ), benchmark: ( BENCHMARKS @@ -311,7 +311,7 @@ rule save_statistics_csv: csv_touch=RESULTS + "statistics/csv/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/.statistics_{carrier}_csv", script: - "../scripts/make_statistics_csv.py" + "../scripts/write_statistics.py" rule plot_statistics_single: diff --git a/scripts/make_statistics_csv.py b/scripts/write_statistics.py similarity index 100% rename from scripts/make_statistics_csv.py rename to scripts/write_statistics.py From 4702f6961db2f6ed6c2d49c576f83307d8773d69 Mon Sep 17 00:00:00 2001 From: Philipp Glaum Date: Mon, 12 Feb 2024 12:08:53 +0100 Subject: [PATCH 30/42] simplify statistics plotting --- rules/postprocess.smk | 23 ++++++----- scripts/plot_statistics_comparison.py | 29 +++++++++----- scripts/plot_statistics_single.py | 10 +---- scripts/write_statistics.py | 58 ++++++++++++++++++--------- 4 files changed, 71 insertions(+), 49 deletions(-) diff --git a/rules/postprocess.smk b/rules/postprocess.smk index c7263a52d..1aca7c49a 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -278,10 +278,9 @@ STATISTICS_BARPLOTS = [ "supply", "withdrawal", "market_value", - "energy_balance", ] -STATISTICS_CSV = [ +STATISTICS = [ "capacity_factor", "installed_capacity", "optimal_capacity", @@ -291,12 +290,14 @@ STATISTICS_CSV = [ "supply", "withdrawal", "market_value", + "energy_balance", + "total_cost", ] rule save_statistics_csv: params: - barplots=STATISTICS_CSV, + barplots=STATISTICS, input: network=RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -306,7 +307,7 @@ rule save_statistics_csv: + "statistics/csv/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/{carrier}_" + f"{csv}.csv" for carrier in config["plotting"].get("carriers", "all") - for csv in STATISTICS_CSV + for csv in STATISTICS }, csv_touch=RESULTS + "statistics/csv/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/.statistics_{carrier}_csv", @@ -317,14 +318,14 @@ rule save_statistics_csv: rule plot_statistics_single: params: plotting=config["plotting"], - barplots=STATISTICS_BARPLOTS, + barplots=STATISTICS, input: **{ f"{csv}": RESULTS + "statistics/csv/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/{carrier}_" + f"{csv}.csv" for carrier in config["plotting"].get("carriers", "all") - for csv in STATISTICS_CSV + for csv in STATISTICS }, output: **{ @@ -332,7 +333,7 @@ rule plot_statistics_single: + "statistics/figures/single/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/{carrier}_" + f"{plot}.pdf" for carrier in config["plotting"].get("carriers", "all") - for plot in STATISTICS_BARPLOTS + for plot in STATISTICS }, barplots_touch=RESULTS + "statistics/figures/single/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/.statistics_{carrier}_plots", @@ -343,13 +344,13 @@ rule plot_statistics_single: rule plot_statistics_comparison: params: plotting=config["plotting"], - barplots=STATISTICS_BARPLOTS, + barplots=STATISTICS, input: expand( RESULTS + "statistics/csv/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}/country_{country}/{carrier}_{csv}.csv", **config["scenario"], - csv=STATISTICS_CSV, + csv=STATISTICS, allow_missing=True, ), output: @@ -358,7 +359,7 @@ rule plot_statistics_comparison: + "statistics/figures/comparison/country_{country}/{carrier}_" + f"{plot}.pdf" for carrier in config["plotting"].get("carriers", "all") - for plot in STATISTICS_BARPLOTS + for plot in STATISTICS }, barplots_touch=RESULTS + "statistics/figures/comparison/country_{country}/.statistics_{carrier}_plots", @@ -376,7 +377,7 @@ rule plot_elec_statistics: **{ f"{plot}_bar": RESULTS + f"figures/statistics_{plot}_bar_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf" - for plot in STATISTICS_BARPLOTS + for plot in STATISTICS }, barplots_touch=RESULTS + "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}", diff --git a/scripts/plot_statistics_comparison.py b/scripts/plot_statistics_comparison.py index 63287c100..79ea41721 100644 --- a/scripts/plot_statistics_comparison.py +++ b/scripts/plot_statistics_comparison.py @@ -59,7 +59,10 @@ def read_csv(input, output): pattern = r"elec_.*?(\d{4})" network_labels = [re.search(pattern, f).group() for f in files] df = pd.concat( - [pd.read_csv(f).set_index(["component", "carrier"]) for f in files], + [ + pd.read_csv(f, skiprows=2).set_index(["component", "carrier"]) + for f in files + ], axis=1, keys=network_labels, ) @@ -91,16 +94,20 @@ def read_csv(input, output): pass continue fig, ax = plt.subplots() - if "energy_balance" not in output: - df = read_csv(snakemake.input, output) - else: - supply = read_csv(snakemake.input, "supply") - withdrawal = read_csv(snakemake.input, "withdrawal") - df = ( - pd.concat([supply, withdrawal.mul(-1)]) - .groupby(["component", "carrier"]) - .sum() - ) + # if output == "energy_balance": + # supply = read_csv(snakemake.input, "supply") + # withdrawal = read_csv(snakemake.input, "withdrawal") + # df = ( + # pd.concat([supply, withdrawal.mul(-1)]) + # .groupby(["component", "carrier"]) + # .sum() + # ) + # elif output == "total_cost": + # opex = read_csv(snakemake.input, "opex") + # capex = read_csv(snakemake.input, "capex") + # df = opex.add(capex, fill_value=0) + # else: + df = read_csv(snakemake.input, output) if df.empty: fig.savefig(snakemake.output[output]) continue diff --git a/scripts/plot_statistics_single.py b/scripts/plot_statistics_single.py index 780fa62cb..47017b801 100644 --- a/scripts/plot_statistics_single.py +++ b/scripts/plot_statistics_single.py @@ -23,7 +23,6 @@ def rename_index(ds): def plot_static_per_carrier(ds, ax): - ds = ds[ds != 0] ds = ds.dropna() c = tech_colors[ds.index.get_level_values("carrier").map(rename_techs)] ds = ds.pipe(rename_index) @@ -33,7 +32,7 @@ def plot_static_per_carrier(ds, ax): def read_csv(input): try: - df = pd.read_csv(input) + df = pd.read_csv(input, skiprows=2) df = df.set_index(["component", "carrier"]).squeeze() except Exception as e: print(f"An error occurred reading file {input}: {e}") @@ -66,12 +65,7 @@ def read_csv(input): pass continue fig, ax = plt.subplots() - if output != "energy_balance": - ds = read_csv(snakemake.input[output]) - else: - supply = read_csv(snakemake.input["supply"]) - withdrawal = read_csv(snakemake.input["withdrawal"]) - ds = pd.concat([supply, withdrawal.mul(-1)]) + ds = read_csv(snakemake.input[output]) if ds.empty: fig.savefig(snakemake.output[output]) continue diff --git a/scripts/write_statistics.py b/scripts/write_statistics.py index 7dfa84044..b49d607c4 100644 --- a/scripts/write_statistics.py +++ b/scripts/write_statistics.py @@ -21,6 +21,16 @@ def groupby_country_and_carrier(n, c, nice_names=False): return [country, carrier] +def call_with_handle(func, **kwargs): + try: + ds = func(**kwargs) + except Exception as e: + print(f"An error occurred: {e}") + ds = pd.Series() + pass + return ds + + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -74,25 +84,35 @@ def groupby_country_and_carrier(n, c, nice_names=False): for output in snakemake.output.keys(): if "touch" in output: continue - if output != "energy_balance": + if output == "energy_balance": + supply = call_with_handle(n.statistics.supply, **kwargs) + withdrawal = call_with_handle(n.statistics.withdrawal, **kwargs) + ds = ( + pd.concat([supply, withdrawal.mul(-1)]) + .groupby(["component", "carrier"]) + .sum() + ) + ds.attrs = supply.attrs + ds.attrs["name"] = "Energy Balance" + elif output == "total_cost": + opex = call_with_handle(n.statistics.opex, **kwargs) + capex = call_with_handle(n.statistics.capex, **kwargs) + ds = opex.add(capex, fill_value=0) + ds.attrs["name"] = "Total Cost" + else: func = eval(f"n.statistics.{output}") - try: - ds = func(**kwargs) - if ds.empty: - print( - f"Empty series for {output} with bus carrier {bus_carrier} and country {country}." - ) - pd.Series().to_csv(snakemake.output[output]) - continue - except Exception as e: - print(f"An error occurred: {e}") - pd.Series().to_csv(snakemake.output[output]) - pass - continue - if country and country != "all": - ds = ds.xs(country, level="country") - ds.name = ds.attrs["name"] - ds.dropna().to_csv(snakemake.output[output]) - # touch file + ds = call_with_handle(func, **kwargs) + + if ds.empty: + print( + f"Empty series for {output} with bus carrier {bus_carrier} and country {country}." + ) + pd.Series().to_csv(snakemake.output[output]) + continue + if country and country != "all": + ds = ds.xs(country, level="country") + pd.Series(ds.attrs).to_csv(snakemake.output[output], header=False) + ds.dropna().to_csv(snakemake.output[output], mode="a") + # touch file with open(snakemake.output.csv_touch, "a"): pass From 0e734fd0072cb425a95cf1a378ad6cb2b84c49d4 Mon Sep 17 00:00:00 2001 From: Philipp Glaum Date: Mon, 12 Feb 2024 13:53:13 +0100 Subject: [PATCH 31/42] improve and clean-up statistic plots --- rules/postprocess.smk | 34 +++++++++++++-------------- scripts/plot_statistics_comparison.py | 21 +++++------------ scripts/plot_statistics_single.py | 10 ++++---- scripts/write_statistics.py | 2 +- 4 files changed, 30 insertions(+), 37 deletions(-) diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 1aca7c49a..f56f79ec6 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -280,24 +280,24 @@ STATISTICS_BARPLOTS = [ "market_value", ] -STATISTICS = [ - "capacity_factor", - "installed_capacity", - "optimal_capacity", - "capex", - "opex", - "curtailment", - "supply", - "withdrawal", - "market_value", - "energy_balance", - "total_cost", -] +STATISTICS = { + "capacity_factor": ("-", "p.u."), + "installed_capacity": (1e3, "GW"), + "optimal_capacity": (1e3, "GW"), + "capex": (1e9, "bn €"), + "opex": (1e9, "bn €"), + "total_cost": ("1e9", "bn €"), + "curtailment": (1e3, "GWh"), + "supply": (1e6, "TWh"), + "withdrawal": (1e6, "TWh"), + "energy_balance": (1e6, "TWh"), + "market_value": ("-", "€/MWh"), +} rule save_statistics_csv: params: - barplots=STATISTICS, + statistics=STATISTICS, input: network=RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -318,7 +318,7 @@ rule save_statistics_csv: rule plot_statistics_single: params: plotting=config["plotting"], - barplots=STATISTICS, + statistics=STATISTICS, input: **{ f"{csv}": RESULTS @@ -344,7 +344,7 @@ rule plot_statistics_single: rule plot_statistics_comparison: params: plotting=config["plotting"], - barplots=STATISTICS, + statistics=STATISTICS, input: expand( RESULTS @@ -377,7 +377,7 @@ rule plot_elec_statistics: **{ f"{plot}_bar": RESULTS + f"figures/statistics_{plot}_bar_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf" - for plot in STATISTICS + for plot in STATISTICS_BARPLOTS }, barplots_touch=RESULTS + "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}", diff --git a/scripts/plot_statistics_comparison.py b/scripts/plot_statistics_comparison.py index 79ea41721..ce20340b7 100644 --- a/scripts/plot_statistics_comparison.py +++ b/scripts/plot_statistics_comparison.py @@ -7,7 +7,6 @@ import re import matplotlib.pyplot as plt -import matplotlib.ticker as ticker import pandas as pd import seaborn as sns from _helpers import configure_logging @@ -40,11 +39,15 @@ def rename_index(df): def plot_static_comparison(df, ax, stacked=False): + factor, unit = conversion[output] df = df[df != 0] df = df.dropna(axis=0, how="all").fillna(0) + if df.empty: + return c = tech_colors[df.index.get_level_values("carrier").map(rename_techs)] df = df.pipe(rename_index).T - df.plot.bar(color=c.values, ax=ax, stacked=stacked, legend=False) + df = df.div(float(factor)) if factor != "-" else df + df.plot.bar(color=c.values, ax=ax, stacked=stacked, legend=False, ylabel=unit) ax.legend( bbox_to_anchor=(1, 1), loc="upper left", @@ -87,6 +90,7 @@ def read_csv(input, output): configure_logging(snakemake) tech_colors = pd.Series(snakemake.params.plotting["tech_colors"]) + conversion = pd.Series(snakemake.params.statistics) for output in snakemake.output.keys(): if "touch" in output: @@ -94,19 +98,6 @@ def read_csv(input, output): pass continue fig, ax = plt.subplots() - # if output == "energy_balance": - # supply = read_csv(snakemake.input, "supply") - # withdrawal = read_csv(snakemake.input, "withdrawal") - # df = ( - # pd.concat([supply, withdrawal.mul(-1)]) - # .groupby(["component", "carrier"]) - # .sum() - # ) - # elif output == "total_cost": - # opex = read_csv(snakemake.input, "opex") - # capex = read_csv(snakemake.input, "capex") - # df = opex.add(capex, fill_value=0) - # else: df = read_csv(snakemake.input, output) if df.empty: fig.savefig(snakemake.output[output]) diff --git a/scripts/plot_statistics_single.py b/scripts/plot_statistics_single.py index 47017b801..22631f096 100644 --- a/scripts/plot_statistics_single.py +++ b/scripts/plot_statistics_single.py @@ -9,7 +9,6 @@ import seaborn as sns from _helpers import configure_logging from plot_summary import rename_techs -from pypsa.statistics import get_carrier sns.set_theme("paper", style="whitegrid") @@ -22,11 +21,13 @@ def rename_index(ds): return ds.set_axis(index) -def plot_static_per_carrier(ds, ax): +def plot_static_single(ds, ax): + factor, unit = conversion[output] ds = ds.dropna() c = tech_colors[ds.index.get_level_values("carrier").map(rename_techs)] ds = ds.pipe(rename_index) - ds.T.plot.barh(color=c.values, ax=ax) + ds = ds.div(float(factor)) if factor != "-" else ds + ds.T.plot.barh(color=c.values, ax=ax, ylabel=unit) ax.grid(axis="x") @@ -58,6 +59,7 @@ def read_csv(input): configure_logging(snakemake) tech_colors = pd.Series(snakemake.params.plotting["tech_colors"]) + conversion = pd.Series(snakemake.params.statistics) for output in snakemake.output.keys(): if "touch" in output: @@ -69,5 +71,5 @@ def read_csv(input): if ds.empty: fig.savefig(snakemake.output[output]) continue - plot_static_per_carrier(ds, ax) + plot_static_single(ds, ax) fig.savefig(snakemake.output[output], bbox_inches="tight") diff --git a/scripts/write_statistics.py b/scripts/write_statistics.py index b49d607c4..98d9a5c3a 100644 --- a/scripts/write_statistics.py +++ b/scripts/write_statistics.py @@ -89,7 +89,7 @@ def call_with_handle(func, **kwargs): withdrawal = call_with_handle(n.statistics.withdrawal, **kwargs) ds = ( pd.concat([supply, withdrawal.mul(-1)]) - .groupby(["component", "carrier"]) + .groupby(supply.index.names) .sum() ) ds.attrs = supply.attrs From b3ab9bbd84ac203516af290096d191fcafed3c52 Mon Sep 17 00:00:00 2001 From: Philipp Glaum Date: Mon, 12 Feb 2024 14:44:39 +0100 Subject: [PATCH 32/42] revert changes in STATISTICS_BARPLOTS --- rules/postprocess.smk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rules/postprocess.smk b/rules/postprocess.smk index f56f79ec6..cf49209a3 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -272,8 +272,8 @@ STATISTICS_BARPLOTS = [ "capacity_factor", "installed_capacity", "optimal_capacity", - "capex", - "opex", + "capital_expenditure", + "operational_expenditure", "curtailment", "supply", "withdrawal", From b3006813f2914bcbd37de120535eecfcda9cb1f9 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 12 Feb 2024 18:17:53 +0100 Subject: [PATCH 33/42] bugfix: include all countries in ammonia production resource This is so that the full EU28 ammonia demand can be correctly subtracted in the build_industry_sector_ratios.py script. No other downstream scripts are affected by this change. --- scripts/build_ammonia_production.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/scripts/build_ammonia_production.py b/scripts/build_ammonia_production.py index 1bcdf9ae5..bfcfaf029 100644 --- a/scripts/build_ammonia_production.py +++ b/scripts/build_ammonia_production.py @@ -8,6 +8,7 @@ import country_converter as coco import pandas as pd +import numpy as np cc = coco.CountryConverter() @@ -30,8 +31,12 @@ ammonia.index = cc.convert(ammonia.index, to="iso2") years = [str(i) for i in range(2013, 2018)] - countries = ammonia.index.intersection(snakemake.params.countries) - ammonia = ammonia.loc[countries, years].astype(float) + + ammonia = ammonia[years] + ammonia.replace("--", + np.nan, + inplace=True) + ammonia = ammonia.astype(float) # convert from ktonN to ktonNH3 ammonia *= 17 / 14 From 882d52ffb82ebaccf8408fe3a5513482bc6af23d Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 12 Feb 2024 18:19:25 +0100 Subject: [PATCH 34/42] bugfix: correct units of subtracted chlorine and methanol in build_industry_sector_ratios.py. In the config the units are Mt/a, they are multiplied by MWh/t, but what is desired is GWh/a. --- scripts/build_industry_sector_ratios.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/build_industry_sector_ratios.py b/scripts/build_industry_sector_ratios.py index 457050026..805cf3e71 100644 --- a/scripts/build_industry_sector_ratios.py +++ b/scripts/build_industry_sector_ratios.py @@ -408,15 +408,15 @@ def chemicals_industry(): df.loc["methane", sector] -= ammonia_total * params["MWh_CH4_per_tNH3_SMR"] df.loc["elec", sector] -= ammonia_total * params["MWh_elec_per_tNH3_SMR"] - # subtract chlorine demand + # subtract chlorine demand (in MtCl/a) chlorine_total = params["chlorine_production_today"] - df.loc["hydrogen", sector] -= chlorine_total * params["MWh_H2_per_tCl"] - df.loc["elec", sector] -= chlorine_total * params["MWh_elec_per_tCl"] + df.loc["hydrogen", sector] -= chlorine_total * params["MWh_H2_per_tCl"] * 1e3 + df.loc["elec", sector] -= chlorine_total * params["MWh_elec_per_tCl"] * 1e3 - # subtract methanol demand + # subtract methanol demand (in MtMeOH/a) methanol_total = params["methanol_production_today"] - df.loc["methane", sector] -= methanol_total * params["MWh_CH4_per_tMeOH"] - df.loc["elec", sector] -= methanol_total * params["MWh_elec_per_tMeOH"] + df.loc["methane", sector] -= methanol_total * params["MWh_CH4_per_tMeOH"] * 1e3 + df.loc["elec", sector] -= methanol_total * params["MWh_elec_per_tMeOH"] * 1e3 # MWh/t material df.loc[sources, sector] = df.loc[sources, sector] / s_out From cdbe0f522480f8242e6a1e5de9d8e9a6fd267081 Mon Sep 17 00:00:00 2001 From: Michael Lindner Date: Tue, 13 Feb 2024 18:47:57 +0100 Subject: [PATCH 35/42] cluster nice names as well --- scripts/prepare_sector_network.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 53372e562..227b10a68 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -3422,7 +3422,11 @@ def define_clustering(attributes, aggregate_dict): for c in n.iterate_components(components): df = c.df - cols = df.columns[df.columns.str.contains("bus") | (df.columns == "carrier")] + cols = df.columns[ + df.columns.str.contains("bus") + | (df.columns == "carrier") + | (df.columns == "nice_name") + ] # rename columns and index df[cols] = df[cols].apply( From 0baeaaae5741a01efd1f6da0e4a3e9e7ec00bd7e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 18:00:04 +0000 Subject: [PATCH 36/42] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- rules/postprocess.smk | 6 +++--- scripts/build_ammonia_production.py | 6 ++---- scripts/prepare_sector_network.py | 6 ++---- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 18d65c163..98b90fd14 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -45,7 +45,7 @@ if config["foresight"] != "perfect": ( LOGS + "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" - ) + ), benchmark: ( BENCHMARKS @@ -74,7 +74,7 @@ if config["foresight"] != "perfect": ( LOGS + "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" - ) + ), benchmark: ( BENCHMARKS @@ -102,7 +102,7 @@ if config["foresight"] != "perfect": ( LOGS + "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" - ) + ), benchmark: ( BENCHMARKS diff --git a/scripts/build_ammonia_production.py b/scripts/build_ammonia_production.py index bfcfaf029..77b330750 100644 --- a/scripts/build_ammonia_production.py +++ b/scripts/build_ammonia_production.py @@ -7,8 +7,8 @@ """ import country_converter as coco -import pandas as pd import numpy as np +import pandas as pd cc = coco.CountryConverter() @@ -33,9 +33,7 @@ years = [str(i) for i in range(2013, 2018)] ammonia = ammonia[years] - ammonia.replace("--", - np.nan, - inplace=True) + ammonia.replace("--", np.nan, inplace=True) ammonia = ammonia.astype(float) # convert from ktonN to ktonNH3 diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 227b10a68..1a1222a37 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -3071,8 +3071,7 @@ def add_industry(n, costs): + mwh_coal_per_mwh_coke * industrial_demand["coke"] ) / nhours - p_set.rename(lambda x: x + " coal for industry", - inplace=True) + p_set.rename(lambda x: x + " coal for industry", inplace=True) if not options["regional_coal_demand"]: p_set = p_set.sum() @@ -3105,7 +3104,6 @@ def add_industry(n, costs): ) - def add_waste_heat(n): # TODO options? @@ -3423,7 +3421,7 @@ def define_clustering(attributes, aggregate_dict): for c in n.iterate_components(components): df = c.df cols = df.columns[ - df.columns.str.contains("bus") + df.columns.str.contains("bus") | (df.columns == "carrier") | (df.columns == "nice_name") ] From 13cff1e21a21c8ce7560f9bc41dfe15b74fe27d5 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Wed, 14 Feb 2024 10:09:13 +0100 Subject: [PATCH 37/42] for today's industry energy demand, separate MeOH, Cl and HVC I.e. split basic chemicals (without ammonia) into MeOH, Cl and HVC. This now agrees with scheme for industrial sectors tomorrow. --- config/config.default.yaml | 1 + ...ustrial_energy_demand_per_country_today.py | 46 +++++++++---------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/config/config.default.yaml b/config/config.default.yaml index dcdf371d4..c7f0c0947 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -637,6 +637,7 @@ industry: 2040: 0.12 2045: 0.16 2050: 0.20 + basic_chemicals_without_NH3_energy_demand_today: 1138. #TWh/a HVC_production_today: 52. MWh_elec_per_tHVC_mechanical_recycling: 0.547 MWh_elec_per_tHVC_chemical_recycling: 6.9 diff --git a/scripts/build_industrial_energy_demand_per_country_today.py b/scripts/build_industrial_energy_demand_per_country_today.py index d1c672f19..696921dee 100644 --- a/scripts/build_industrial_energy_demand_per_country_today.py +++ b/scripts/build_industrial_energy_demand_per_country_today.py @@ -73,7 +73,7 @@ def industrial_energy_demand_per_country(country, year, jrc_dir): def get_subsector_data(sheet): df = df_dict[sheet][year].groupby(fuels).sum() - df["ammonia"] = 0.0 + df["hydrogen"] = 0.0 df["other"] = df["all"] - df.loc[df.index != "all"].sum() @@ -94,35 +94,40 @@ def get_subsector_data(sheet): return df -def add_ammonia_energy_demand(demand): +def separate_basic_chemicals(demand): # MtNH3/a fn = snakemake.input.ammonia_production ammonia = pd.read_csv(fn, index_col=0)[str(year)] / 1e3 - def get_ammonia_by_fuel(x): - fuels = { - "gas": params["MWh_CH4_per_tNH3_SMR"], - "electricity": params["MWh_elec_per_tNH3_SMR"], - } + ammonia = pd.DataFrame({"gas": ammonia * params["MWh_CH4_per_tNH3_SMR"], + "electricity" : ammonia * params["MWh_elec_per_tNH3_SMR"]}).T - return pd.Series({k: x * v for k, v in fuels.items()}) + demand["Ammonia"] = ammonia.unstack().reindex(index=demand.index, fill_value=0.0) - ammonia_by_fuel = ammonia.apply(get_ammonia_by_fuel).T - ammonia_by_fuel = ammonia_by_fuel.unstack().reindex( - index=demand.index, fill_value=0.0 + demand["Basic chemicals (without ammonia)"] = ( + demand["Basic chemicals"] - demand["Ammonia"] ) - ammonia = pd.DataFrame({"ammonia": ammonia * params["MWh_NH3_per_tNH3"]}).T + demand.drop(columns="Basic chemicals", inplace=True) - demand["Ammonia"] = ammonia.unstack().reindex(index=demand.index, fill_value=0.0) + distribution = demand["Basic chemicals (without ammonia)"].groupby(level=0).sum()/params["basic_chemicals_without_NH3_energy_demand_today"] - demand["Basic chemicals (without ammonia)"] = ( - demand["Basic chemicals"] - ammonia_by_fuel + chlorine = pd.DataFrame({"hydrogen": distribution * params["chlorine_production_today"] * params["MWh_H2_per_tCl"], + "electricity" : distribution * params["chlorine_production_today"] * params["MWh_elec_per_tCl"]}).T + + methanol = pd.DataFrame({"gas": distribution * params["methanol_production_today"] * params["MWh_CH4_per_tMeOH"], + "electricity" : distribution * params["methanol_production_today"] * params["MWh_elec_per_tMeOH"]}).T + + demand["Chlorine"] = chlorine.unstack().reindex(index=demand.index, fill_value=0.0) + demand["Methanol"] = methanol.unstack().reindex(index=demand.index, fill_value=0.0) + + demand["HVC"] = ( + demand["Basic chemicals (without ammonia)"] -demand["Methanol"] - demand["Chlorine"] ) - demand["Basic chemicals (without ammonia)"].clip(lower=0, inplace=True) + demand.drop(columns="Basic chemicals (without ammonia)", inplace=True) - demand.drop(columns="Basic chemicals", inplace=True) + demand["HVC"].clip(lower=0, inplace=True) return demand @@ -135,11 +140,6 @@ def add_non_eu28_industrial_energy_demand(countries, demand): fn = snakemake.input.industrial_production_per_country production = pd.read_csv(fn, index_col=0) / 1e3 - # recombine HVC, Chlorine and Methanol to Basic chemicals (without ammonia) - chemicals = ["HVC", "Chlorine", "Methanol"] - production["Basic chemicals (without ammonia)"] = production[chemicals].sum(axis=1) - production.drop(columns=chemicals, inplace=True) - eu28_production = production.loc[countries.intersection(eu28)].sum() eu28_energy = demand.groupby(level=1).sum() eu28_averages = eu28_energy / eu28_production @@ -182,7 +182,7 @@ def industrial_energy_demand(countries, year): demand = industrial_energy_demand(countries.intersection(eu28), year) - demand = add_ammonia_energy_demand(demand) + demand = separate_basic_chemicals(demand) demand = add_non_eu28_industrial_energy_demand(countries, demand) From 13412fd85f78e8230e81078409c2b8f541d67756 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Wed, 14 Feb 2024 10:33:50 +0100 Subject: [PATCH 38/42] industrial prod: use EU28 total for denominator for distribution key This makes sure the distribution key is correct when only subsets of countries are used. This is then consistent with the HVC, MeOH and Cl totals being EU28 totals. Without this change, industry production is overestimated when using subsets of countries. Or the user has to adjust the totals for industrial production themselves. --- config/config.default.yaml | 1 + scripts/build_industrial_production_per_country.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/config/config.default.yaml b/config/config.default.yaml index c7f0c0947..2dad2dffc 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -638,6 +638,7 @@ industry: 2045: 0.16 2050: 0.20 basic_chemicals_without_NH3_energy_demand_today: 1138. #TWh/a + basic_chemicals_without_NH3_production_today: 69. #Mt/a HVC_production_today: 52. MWh_elec_per_tHVC_mechanical_recycling: 0.547 MWh_elec_per_tHVC_chemical_recycling: 6.9 diff --git a/scripts/build_industrial_production_per_country.py b/scripts/build_industrial_production_per_country.py index 0aea4f150..afbe4de72 100644 --- a/scripts/build_industrial_production_per_country.py +++ b/scripts/build_industrial_production_per_country.py @@ -261,7 +261,7 @@ def separate_basic_chemicals(demand, year): demand["Basic chemicals"].clip(lower=0.0, inplace=True) # assume HVC, methanol, chlorine production proportional to non-ammonia basic chemicals - distribution_key = demand["Basic chemicals"] / demand["Basic chemicals"].sum() + distribution_key = demand["Basic chemicals"] / params["basic_chemicals_without_NH3_production_today"] / 1e3 demand["HVC"] = params["HVC_production_today"] * 1e3 * distribution_key demand["Chlorine"] = params["chlorine_production_today"] * 1e3 * distribution_key demand["Methanol"] = params["methanol_production_today"] * 1e3 * distribution_key From 958dacb0a6416beb11c9bf635bf42e10d43275e5 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Wed, 14 Feb 2024 12:24:58 +0100 Subject: [PATCH 39/42] use production to determine today's energy demand for basic chemicals This uniformises how demand for basic chemicals is calculated. We also avoid unnecessary use of ammonia production separately. --- config/config.default.yaml | 3 +- rules/build_sector.smk | 1 - ...ustrial_energy_demand_per_country_today.py | 45 +++++++------------ 3 files changed, 17 insertions(+), 32 deletions(-) diff --git a/config/config.default.yaml b/config/config.default.yaml index 2dad2dffc..3f062c756 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -637,8 +637,7 @@ industry: 2040: 0.12 2045: 0.16 2050: 0.20 - basic_chemicals_without_NH3_energy_demand_today: 1138. #TWh/a - basic_chemicals_without_NH3_production_today: 69. #Mt/a + basic_chemicals_without_NH3_production_today: 69. #Mt/a, = 86 Mtethylene-equiv - 17 MtNH3 HVC_production_today: 52. MWh_elec_per_tHVC_mechanical_recycling: 0.547 MWh_elec_per_tHVC_chemical_recycling: 6.9 diff --git a/rules/build_sector.smk b/rules/build_sector.smk index c25c86739..f50432d63 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -566,7 +566,6 @@ rule build_industrial_energy_demand_per_country_today: industry=config["industry"], input: jrc="data/bundle-sector/jrc-idees-2015", - ammonia_production=RESOURCES + "ammonia_production.csv", industrial_production_per_country=RESOURCES + "industrial_production_per_country.csv", output: diff --git a/scripts/build_industrial_energy_demand_per_country_today.py b/scripts/build_industrial_energy_demand_per_country_today.py index 696921dee..65569b555 100644 --- a/scripts/build_industrial_energy_demand_per_country_today.py +++ b/scripts/build_industrial_energy_demand_per_country_today.py @@ -94,51 +94,34 @@ def get_subsector_data(sheet): return df -def separate_basic_chemicals(demand): - # MtNH3/a - fn = snakemake.input.ammonia_production - ammonia = pd.read_csv(fn, index_col=0)[str(year)] / 1e3 +def separate_basic_chemicals(demand, production): - ammonia = pd.DataFrame({"gas": ammonia * params["MWh_CH4_per_tNH3_SMR"], - "electricity" : ammonia * params["MWh_elec_per_tNH3_SMR"]}).T + ammonia = pd.DataFrame({"hydrogen": production["Ammonia"] * params["MWh_H2_per_tNH3_electrolysis"], + "electricity" : production["Ammonia"] * params["MWh_elec_per_tNH3_electrolysis"]}).T + chlorine = pd.DataFrame({"hydrogen": production["Chlorine"] * params["MWh_H2_per_tCl"], + "electricity" : production["Chlorine"] * params["MWh_elec_per_tCl"]}).T + methanol = pd.DataFrame({"gas": production["Methanol"] * params["MWh_CH4_per_tMeOH"], + "electricity" : production["Methanol"] * params["MWh_elec_per_tMeOH"]}).T demand["Ammonia"] = ammonia.unstack().reindex(index=demand.index, fill_value=0.0) - - demand["Basic chemicals (without ammonia)"] = ( - demand["Basic chemicals"] - demand["Ammonia"] - ) - - demand.drop(columns="Basic chemicals", inplace=True) - - distribution = demand["Basic chemicals (without ammonia)"].groupby(level=0).sum()/params["basic_chemicals_without_NH3_energy_demand_today"] - - chlorine = pd.DataFrame({"hydrogen": distribution * params["chlorine_production_today"] * params["MWh_H2_per_tCl"], - "electricity" : distribution * params["chlorine_production_today"] * params["MWh_elec_per_tCl"]}).T - - methanol = pd.DataFrame({"gas": distribution * params["methanol_production_today"] * params["MWh_CH4_per_tMeOH"], - "electricity" : distribution * params["methanol_production_today"] * params["MWh_elec_per_tMeOH"]}).T - demand["Chlorine"] = chlorine.unstack().reindex(index=demand.index, fill_value=0.0) demand["Methanol"] = methanol.unstack().reindex(index=demand.index, fill_value=0.0) demand["HVC"] = ( - demand["Basic chemicals (without ammonia)"] -demand["Methanol"] - demand["Chlorine"] + demand["Basic chemicals"] - demand["Ammonia"] - demand["Methanol"] - demand["Chlorine"] ) - demand.drop(columns="Basic chemicals (without ammonia)", inplace=True) + demand.drop(columns="Basic chemicals", inplace=True) demand["HVC"].clip(lower=0, inplace=True) return demand -def add_non_eu28_industrial_energy_demand(countries, demand): +def add_non_eu28_industrial_energy_demand(countries, demand, production): non_eu28 = countries.difference(eu28) if non_eu28.empty: return demand - # output in MtMaterial/a - fn = snakemake.input.industrial_production_per_country - production = pd.read_csv(fn, index_col=0) / 1e3 eu28_production = production.loc[countries.intersection(eu28)].sum() eu28_energy = demand.groupby(level=1).sum() @@ -182,9 +165,13 @@ def industrial_energy_demand(countries, year): demand = industrial_energy_demand(countries.intersection(eu28), year) - demand = separate_basic_chemicals(demand) + # output in MtMaterial/a + production = pd.read_csv(snakemake.input.industrial_production_per_country, + index_col=0) / 1e3 + + demand = separate_basic_chemicals(demand, production) - demand = add_non_eu28_industrial_energy_demand(countries, demand) + demand = add_non_eu28_industrial_energy_demand(countries, demand, production) # for format compatibility demand = demand.stack(dropna=False).unstack(level=[0, 2]) From 6a65cf90ce51579b3db7acd776114ff287d8d7c6 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Wed, 14 Feb 2024 18:15:18 +0100 Subject: [PATCH 40/42] new script to interpolate industry sector ratios today to tomorrow For each country we gradually switch industry processes from today's specific energy carrier usage per ton material output to the best-in-class energy consumption of tomorrow in the industry_sector_ratios.csv. This is done on a per-country basis. The ratio of today to tomorrow's energy consumption is set with the config["industry"]["sector_ratios_fraction_future"] parameter. --- config/config.default.yaml | 8 ++ rules/build_sector.smk | 26 ++++++- ...build_industrial_energy_demand_per_node.py | 21 +++-- ...ild_industry_sector_ratios_intermediate.py | 77 +++++++++++++++++++ 4 files changed, 123 insertions(+), 9 deletions(-) create mode 100644 scripts/build_industry_sector_ratios_intermediate.py diff --git a/config/config.default.yaml b/config/config.default.yaml index 3f062c756..a33cf1e14 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -637,6 +637,14 @@ industry: 2040: 0.12 2045: 0.16 2050: 0.20 + sector_ratios_fraction_future: + 2020: 0.0 + 2025: 0.1 + 2030: 0.3 + 2035: 0.5 + 2040: 0.7 + 2045: 0.9 + 2050: 1.0 basic_chemicals_without_NH3_production_today: 69. #Mt/a, = 86 Mtethylene-equiv - 17 MtNH3 HVC_production_today: 52. MWh_elec_per_tHVC_mechanical_recycling: 0.547 diff --git a/rules/build_sector.smk b/rules/build_sector.smk index f50432d63..bec9aa7aa 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -433,6 +433,30 @@ rule build_industry_sector_ratios: "../scripts/build_industry_sector_ratios.py" +rule build_industry_sector_ratios_intermediate: + params: + industry=config["industry"], + input: + industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv", + industrial_energy_demand_per_country_today=RESOURCES + + "industrial_energy_demand_per_country_today.csv", + industrial_production_per_country=RESOURCES + + "industrial_production_per_country.csv", + output: + industry_sector_ratios=RESOURCES + "industry_sector_ratios_{planning_horizons}.csv", + threads: 1 + resources: + mem_mb=1000, + log: + LOGS + "build_industry_sector_ratios_{planning_horizons}.log", + benchmark: + BENCHMARKS + "build_industry_sector_ratios_{planning_horizons}" + conda: + "../envs/environment.yaml" + script: + "../scripts/build_industry_sector_ratios_intermediate.py" + + rule build_industrial_production_per_country: params: industry=config["industry"], @@ -535,7 +559,7 @@ rule build_industrial_production_per_node: rule build_industrial_energy_demand_per_node: input: - industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv", + industry_sector_ratios=RESOURCES + "industry_sector_ratios_{planning_horizons}.csv", industrial_production_per_node=RESOURCES + "industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv", industrial_energy_demand_per_node_today=RESOURCES diff --git a/scripts/build_industrial_energy_demand_per_node.py b/scripts/build_industrial_energy_demand_per_node.py index 55c10c5d6..84f8679ad 100644 --- a/scripts/build_industrial_energy_demand_per_node.py +++ b/scripts/build_industrial_energy_demand_per_node.py @@ -19,23 +19,28 @@ planning_horizons=2030, ) - # import EU ratios df as csv + # import ratios fn = snakemake.input.industry_sector_ratios - industry_sector_ratios = pd.read_csv(fn, index_col=0) + sector_ratios = pd.read_csv(fn, + header=[0,1], + index_col=0) - # material demand per node and industry (kton/a) + # material demand per node and industry (Mton/a) fn = snakemake.input.industrial_production_per_node - nodal_production = pd.read_csv(fn, index_col=0) + nodal_production = pd.read_csv(fn, index_col=0) / 1e3 # energy demand today to get current electricity fn = snakemake.input.industrial_energy_demand_per_node_today nodal_today = pd.read_csv(fn, index_col=0) - # final energy consumption per node and industry (TWh/a) - nodal_df = nodal_production.dot(industry_sector_ratios.T) + nodal_sector_ratios = pd.concat({node: sector_ratios[node[:2]] for node in nodal_production.index}, + axis=1) + + nodal_production_stacked = nodal_production.stack() + nodal_production_stacked.index.names = [None,None] - # convert GWh to TWh and ktCO2 to MtCO2 - nodal_df *= 0.001 + # final energy consumption per node and industry (TWh/a) + nodal_df = (nodal_sector_ratios.multiply(nodal_production_stacked)).T.groupby(level=0).sum() rename_sectors = { "elec": "electricity", diff --git a/scripts/build_industry_sector_ratios_intermediate.py b/scripts/build_industry_sector_ratios_intermediate.py new file mode 100644 index 000000000..86f88218a --- /dev/null +++ b/scripts/build_industry_sector_ratios_intermediate.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build specific energy consumption by carrier and industries and by country, +that interpolates between the current average energy consumption (from 2015-2020) +and the ideal future best-in-class consumption. +""" + +import pandas as pd + +from prepare_sector_network import get + +def build_industry_sector_ratios_intermediate(): + + # in TWh/a + demand = pd.read_csv(snakemake.input.industrial_energy_demand_per_country_today, + header=[0,1], + index_col=0) + + # in Mt/a + production = pd.read_csv(snakemake.input.industrial_production_per_country, + index_col=0) / 1e3 + production = production.unstack().swaplevel() + + # in MWh/t + future_sector_ratios = pd.read_csv(snakemake.input.industry_sector_ratios, + index_col=0) + + production.index.names = [None,None] + + today_sector_ratios = demand.div(production, axis=1) + + today_sector_ratios.drop(columns=today_sector_ratios.columns[today_sector_ratios.isna().all()], + inplace=True) + + rename = pd.Series(today_sector_ratios.index, + today_sector_ratios.index) + rename["waste"] = "biomass" + rename["electricity"] = "elec" + rename["solid"] = "coke" + rename["gas"] = "methane" + rename["other"] = "biomass" + rename["liquid"] = "naphtha" + + today_sector_ratios.rename(rename, + inplace=True) + + + fraction_future = get(params["sector_ratios_fraction_future"], year) + + intermediate_sector_ratios = {} + + for ct in today_sector_ratios.columns.unique(level=0): + + intermediate_sector_ratio = future_sector_ratios.copy() + + intermediate_sector_ratio.loc[today_sector_ratios[ct].index,today_sector_ratios[ct].columns] = (fraction_future*intermediate_sector_ratio.loc[today_sector_ratios[ct].index,today_sector_ratios[ct].columns] + + (1 - fraction_future)*today_sector_ratios[ct]) + intermediate_sector_ratios[ct] = intermediate_sector_ratio + + intermediate_sector_ratios = pd.concat(intermediate_sector_ratios, axis=1) + + intermediate_sector_ratios.to_csv(snakemake.output.industry_sector_ratios) + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("build_industry_sector_ratios_intermediate") + + year = int(snakemake.wildcards.planning_horizons[-4:]) + + params = snakemake.params.industry + + build_industry_sector_ratios_intermediate() From e7b5a0b20bebbc54612bd62bd54228c7ed32b87e Mon Sep 17 00:00:00 2001 From: Philipp Glaum Date: Thu, 15 Feb 2024 17:09:29 +0100 Subject: [PATCH 41/42] plot_statistics_single: fix read csv file when file had only single entry, adjust axis label --- scripts/plot_statistics_single.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/plot_statistics_single.py b/scripts/plot_statistics_single.py index 22631f096..97491f58e 100644 --- a/scripts/plot_statistics_single.py +++ b/scripts/plot_statistics_single.py @@ -27,14 +27,14 @@ def plot_static_single(ds, ax): c = tech_colors[ds.index.get_level_values("carrier").map(rename_techs)] ds = ds.pipe(rename_index) ds = ds.div(float(factor)) if factor != "-" else ds - ds.T.plot.barh(color=c.values, ax=ax, ylabel=unit) - ax.grid(axis="x") + ds.T.plot.barh(color=c.values, ax=ax, xlabel=unit) + ax.grid(axis="y") def read_csv(input): try: df = pd.read_csv(input, skiprows=2) - df = df.set_index(["component", "carrier"]).squeeze() + df = df.set_index(["component", "carrier"]).iloc[:, 0] except Exception as e: print(f"An error occurred reading file {input}: {e}") df = pd.DataFrame() From 32abfdd96c4365be648d4d972a305ea68a3b4289 Mon Sep 17 00:00:00 2001 From: toniseibold Date: Wed, 21 Feb 2024 16:39:20 +0100 Subject: [PATCH 42/42] update energy totals from 2011 to 2019 --- config/config.default.yaml | 4 +- rules/common.smk | 7 +- scripts/build_energy_totals.py | 244 +++++++++++++++++++++++++++++---- 3 files changed, 224 insertions(+), 31 deletions(-) diff --git a/config/config.default.yaml b/config/config.default.yaml index e722d85e9..89b650f19 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -314,9 +314,9 @@ pypsa_eur: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#energy energy: - energy_totals_year: 2011 + energy_totals_year: 2019 base_emissions_year: 1990 - eurostat_report_year: 2016 + eurostat_report_year: 2023 emissions: CO2 # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#biomass diff --git a/rules/common.smk b/rules/common.smk index 5aa7ae53a..6f156455e 100644 --- a/rules/common.smk +++ b/rules/common.smk @@ -65,8 +65,11 @@ def has_internet_access(url="www.zenodo.org") -> bool: def input_eurostat(w): # 2016 includes BA, 2017 does not - report_year = config["energy"]["eurostat_report_year"] - return f"data/bundle-sector/eurostat-energy_balances-june_{report_year}_edition" + if config["energy"]["eurostat_report_year"] != 2023: + report_year = config["energy"]["eurostat_report_year"] + return f"data/bundle-sector/eurostat-energy_balances-june_{report_year}_edition" + else: + return "data/bundle-sector/eurostat-energy_balances-april_2023_edition" def solved_previous_horizon(wildcards): diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index c67bb49d6..2d7ca6427 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -16,6 +16,7 @@ import pandas as pd from _helpers import mute_print from tqdm import tqdm +import os cc = coco.CountryConverter() logger = logging.getLogger(__name__) @@ -120,36 +121,92 @@ def build_eurostat(input_eurostat, countries, report_year, year): """ Return multi-index for all countries' energy data in TWh/a. """ - filenames = { + if report_year != 2023: + filenames = { 2016: f"/{year}-Energy-Balances-June2016edition.xlsx", 2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx", } - with mute_print(): - dfs = pd.read_excel( - input_eurostat + filenames[report_year], - sheet_name=None, - skiprows=1, - index_col=list(range(4)), - ) - - # sorted_index necessary for slicing - lookup = eurostat_codes - labelled_dfs = { - lookup[df.columns[0]]: df - for df in dfs.values() - if lookup[df.columns[0]] in countries - } - df = pd.concat(labelled_dfs, sort=True).sort_index() - - # drop non-numeric and country columns - non_numeric_cols = df.columns[df.dtypes != float] - country_cols = df.columns.intersection(lookup.keys()) - to_drop = non_numeric_cols.union(country_cols) - df.drop(to_drop, axis=1, inplace=True) + with mute_print(): + dfs = pd.read_excel( + input_eurostat + filenames[report_year], + sheet_name=None, + skiprows=1, + index_col=list(range(4)), + ) - # convert ktoe/a to TWh/a - df *= 11.63 / 1e3 + # sorted_index necessary for slicing + lookup = eurostat_codes + labelled_dfs = { + lookup[df.columns[0]]: df + for df in dfs.values() + if lookup[df.columns[0]] in countries + } + df = pd.concat(labelled_dfs, sort=True).sort_index() + # drop non-numeric and country columns + non_numeric_cols = df.columns[df.dtypes != float] + country_cols = df.columns.intersection(lookup.keys()) + to_drop = non_numeric_cols.union(country_cols) + df.drop(to_drop, axis=1, inplace=True) + + # convert ktoe/a to TWh/a + df *= 11.63 / 1e3 + + else: + # read in every country file in countries + eurostat = pd.DataFrame() + countries = [country if country != 'GB' else 'UK' for country in countries] + for country in countries: + filename = f"/{country}-Energy-balance-sheets-April-2023-edition.xlsb" + if os.path.exists(input_eurostat + filename): + df = pd.read_excel( + input_eurostat + filename, + engine='pyxlsb', + sheet_name=str(year), + skiprows=4, + index_col=list(range(4))) + # replace entry 'Z' with 0 + df.replace('Z', 0, inplace=True) + # write 'International aviation' to the 2nd level of the multiindex + index_number = (df.index.get_level_values(1) == 'International aviation').argmax() + new_index = ('-', 'International aviation', 'International aviation', 'ktoe') + modified_index = list(df.index) + modified_index[index_number] = new_index + df.index = pd.MultiIndex.from_tuples(modified_index, names=df.index.names) + # drop the annoying subhead line + df.drop(df[df[year] == year].index, inplace=True) + # replace 'Z' with 0 + df = df.replace('Z', 0) + # add country to the multiindex + new_tuple = [(country, *idx) for idx in df.index] + new_mindex = pd.MultiIndex.from_tuples(new_tuple, names=['country', None, 'name', None, 'unit']) + df.index = new_mindex + # make numeric values where possible + df = df.apply(pd.to_numeric, errors='coerce') + # drop non-numeric columns + non_numeric_cols = df.columns[df.dtypes != float] + df.drop(non_numeric_cols, axis=1, inplace=True) + # concatenate the dataframes + eurostat = pd.concat([eurostat, df], axis=0) + + eurostat.drop(["Unnamed: 4", year, "Unnamed: 6"], axis=1, inplace=True) + # Renaming some indices + rename = { + 'Households': 'Residential', + 'Commercial & public services': 'Services', + 'Domestic navigation': 'Domestic Navigation' + } + for name, rename in rename.items(): + eurostat.index = eurostat.index.set_levels( + eurostat.index.levels[3].where(eurostat.index.levels[3] != name, rename), + level=3) + new_index = eurostat.index.set_levels(eurostat.index.levels[2].where(eurostat.index.levels[2] != 'International maritime bunkers', 'Bunkers'), level=2) + eurostat.index = new_index + + eurostat.rename(columns={'Total': 'Total all products'}, inplace=True) + eurostat.index = eurostat.index.set_levels(eurostat.index.levels[0].where(eurostat.index.levels[0] != 'UK', 'GB'), level=0) + + df = eurostat * 11.63 / 1e3 return df @@ -736,6 +793,129 @@ def build_transport_data(countries, population, idees): return transport_data +def rescale(idees_countries, energy, eurostat): + ''' + Takes JRC IDEES data from 2015 and rescales it by the ratio of the + eurostat data and the 2015 eurostat data. + missing data: ['passenger car efficiency', 'passenger cars'] + ''' + # read in the eurostat data for 2015 + eurostat_2015 = build_eurostat(input_eurostat, countries, 2023, 2015)[["Total all products", "Electricity"]] + eurostat_year = eurostat[["Total all products", "Electricity"]] + # calculate the ratio of the two data sets + ratio = eurostat_year / eurostat_2015 + ratio = ratio.droplevel([1,4]) + ratio.rename(columns={"Total all products": "total", "Electricity": "ele"}, inplace=True) + + residential_total = [ + "total residential space", + "total residential water", + "total residential cooking", + "total residential", + "derived heat residential", + "thermal uses residential", + ] + residential_ele = [ + "electricity residential space", + "electricity residential water", + "electricity residential cooking", + "electricity residential", + ] + + service_total = [ + "total services space", + "total services water", + "total services cooking", + "total services", + "derived heat services", + "thermal uses services", + ] + service_ele = [ + "electricity services space", + "electricity services water", + "electricity services cooking", + "electricity services", + ] + + agri_total = [ + "total agriculture heat", + "total agriculture machinery", + "total agriculture", + ] + agri_ele = [ + "total agriculture electricity", + ] + + road_total = [ + "total road", + "total passenger cars", + "total other road passenger", + "total light duty road freight", + ] + road_ele = [ + "electricity road", + "electricity passenger cars", + "electricity other road passenger", + "electricity light duty road freight", + ] + + rail_total = [ + "total rail", + "total rail passenger", + "total rail freight", + ] + rail_ele = [ + "electricity rail", + "electricity rail passenger", + "electricity rail freight", + ] + + avia_inter = [ + 'total aviation passenger', + 'total aviation freight', + 'total international aviation passenger', + 'total international aviation freight', + 'total international aviation' + ] + avia_domestic = [ + 'total domestic aviation passenger', + 'total domestic aviation freight', + 'total domestic aviation', + ] + navigation = [ + "total domestic navigation", + ] + + for country in idees_countries: + res = ratio.loc[(country, slice(None), 'Residential')] + energy.loc[country, residential_total] *= res[['total']].iloc[0,0] + energy.loc[country, residential_ele] *= res[['ele']].iloc[0,0] + + ser = ratio.loc[(country, slice(None), 'Services')] + energy.loc[country, service_total] *= ser[['total']].iloc[0,0] + energy.loc[country, service_ele] *= ser[['ele']].iloc[0,0] + + agri = ratio.loc[(country, slice(None), 'Agriculture & forestry')] + energy.loc[country, agri_total] *= agri[['total']].iloc[0,0] + energy.loc[country, agri_ele] *= agri[['ele']].iloc[0,0] + + road = ratio.loc[(country, slice(None), 'Road')] + energy.loc[country, road_total] *= road[['total']].iloc[0,0] + energy.loc[country, road_ele] *= road[['ele']].iloc[0,0] + + rail = ratio.loc[(country, slice(None), 'Rail')] + energy.loc[country, rail_total] *= rail[['total']].iloc[0,0] + energy.loc[country, rail_ele] *= rail[['ele']].iloc[0,0] + + avi_d = ratio.loc[(country, slice(None), 'Domestic aviation')] + avi_i = ratio.loc[(country, 'International aviation', slice(None))] + energy.loc[country, avia_inter] *= avi_i[['total']].iloc[0,0] + energy.loc[country, avia_domestic] *= avi_d[['total']].iloc[0,0] + + nav = ratio.loc[(country, slice(None), 'Domestic Navigation')] + energy.loc[country, navigation] *= nav[['total']].iloc[0,0] + + return energy if __name__ == "__main__": if "snakemake" not in globals(): @@ -758,12 +938,22 @@ def build_transport_data(countries, population, idees): input_eurostat = snakemake.input.eurostat eurostat = build_eurostat(input_eurostat, countries, report_year, data_year) swiss = build_swiss(data_year) - idees = build_idees(idees_countries, data_year) + # data from idees only exists for 2015 + if data_year > 2015: + # read in latest data and rescale later + idees = build_idees(idees_countries, 2015) + else: + idees = build_idees(idees_countries, data_year) energy = build_energy_totals(countries, eurostat, swiss, idees) + + if data_year > 2015: + energy = rescale(idees_countries, energy, eurostat) + energy.to_csv(snakemake.output.energy_name) - district_heat_share = build_district_heat_share(countries, idees) + # use rescaled idees data to calculate district heat share + district_heat_share = build_district_heat_share(countries, energy.loc[idees_countries]) district_heat_share.to_csv(snakemake.output.district_heat_share) base_year_emissions = params["base_emissions_year"]