diff --git a/.gitignore b/.gitignore index 3909265b..a55300e2 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,7 @@ gurobi.log /data/switzerland* /data/.nfs* /data/Industrial_Database.csv +/data/retro/tabula-calculator-calcsetbuilding.csv *.org diff --git a/Snakefile b/Snakefile index 0e077caf..c8b1d65e 100644 --- a/Snakefile +++ b/Snakefile @@ -281,14 +281,15 @@ rule build_industrial_demand: rule build_retro_cost: input: building_stock="data/retro/data_building_stock.csv", + data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv", + air_temperature = "resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", u_values_PL="data/retro/u_values_poland.csv", tax_w="data/retro/electricity_taxes_eu.csv", construction_index="data/retro/comparative_level_investment.csv", - average_surface="data/retro/average_surface_components.csv", floor_area_missing="data/retro/floor_area_missing.csv", clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", cost_germany="data/retro/retro_cost_germany.csv", - window_assumptions="data/retro/window_assumptions.csv" + window_assumptions="data/retro/window_assumptions.csv", output: retro_cost="resources/retro_cost_elec_s{simpl}_{clusters}.csv", floor_area="resources/floor_area_elec_s{simpl}_{clusters}.csv" diff --git a/config.default.yaml b/config.default.yaml index 6b4a20f3..74a61b85 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -119,20 +119,25 @@ sector: 'time_dep_hp_cop' : True #time dependent heat pump coefficient of performance 'heat_pump_sink_T' : 55. # Celsius, based on DTU / large area radiators; used in build_cop_profiles.py # conservatively high to cover hot water and space heating in poorly-insulated buildings - 'retrofitting' : - 'retro_exogen': True # space heat demand savings exogenously - 'dE': # reduction of space heat demand (applied before losses in DH) - 2020 : 0. - 2030 : 0.15 - 2040 : 0.3 - 2050 : 0.4 + 'reduce_space_heat_exogenously': True # reduces space heat demand by a given factor (applied before losses in DH) + # this can represent e.g. building renovation, building demolition, or if + # the factor is negative: increasing floor area, increased thermal comfort, population growth + 'reduce_space_heat_exogenously_factor': # per unit reduction in space heat demand + # the default factors are determined by the LTS scenario from http://tool.european-calculator.eu/app/buildings/building-types-area/?levers=1ddd4444421213bdbbbddd44444ffffff11f411111221111211l212221 + 2020: 0.10 # this results in a space heat demand reduction of 10% + 2025: 0.09 # first heat demand increases compared to 2020 because of larger floor area per capita + 2030: 0.09 + 2035: 0.11 + 2040: 0.16 + 2045: 0.21 + 2050: 0.29 + 'retrofitting' : # co-optimises building renovation to reduce space heat demand 'retro_endogen': False # co-optimise space heat savings - 'cost_factor' : 1.0 + 'cost_factor' : 1.0 # weight costs for building renovation 'interest_rate': 0.04 # for investment in building components 'annualise_cost': True # annualise the investment costs 'tax_weighting': False # weight costs depending on taxes in countries - 'construction_index': True # weight costs depending on labour/material costs per ct - 'l_strength': ["0.076", "0.197"] # additional insulation thickness[m], determines number of retro steps(=generators per bus) and maximum possible savings + 'construction_index': True # weight costs depending on labour/material costs per country 'tes' : True 'tes_tau' : 3. 'boilers' : True diff --git a/data/retro/average_surface_components.csv b/data/retro/average_surface_components.csv deleted file mode 100644 index de72edde..00000000 --- a/data/retro/average_surface_components.csv +++ /dev/null @@ -1,7 +0,0 @@ -,Dwelling,Ceilling,Standard component surfaces (m2),component,surfaces,(m2),, -Building type,Space(m²),Height(m),Roof,Facade,Floor,Windows,, -Single/two family house,120,2.5,90,166,63,29,, -Large apartment house,1457,2.5,354,1189,354,380,, -Apartment house,5276,,598.337,2992.1,598.337,756,tabula ,http://webtool.building-typology.eu/#pdfes -,,,,,,,, -"Source: https://link.springer.com/article/10.1007/s12053-010-9090-6 ,p.4",,,,,,,, diff --git a/doc/data.csv b/doc/data.csv index e8c19518..8e316281 100644 --- a/doc/data.csv +++ b/doc/data.csv @@ -22,5 +22,5 @@ U-values Poland,u_values_poland.csv,unknown,https://data.europa.eu/euodp/de/data Floor area missing in hotmaps building stock data,floor_area_missing.csv,unknown,https://data.europa.eu/euodp/de/data/dataset/building-stock-observatory Comparative level investment,comparative_level_investment.csv,Eurostat,https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Comparative_price_levels_for_investment Electricity taxes,electricity_taxes_eu.csv,Eurostat,https://appsso.eurostat.ec.europa.eu/nui/show.do?dataset=nrg_pc_204&lang=en -Average surface components,average_surface_components.csv,unknown,http://webtool.building-typology.eu/#bm +Building topologies and corresponding standard values,tabula-calculator-calcsetbuilding.csv,unknown,https://episcope.eu/fileadmin/tabula/public/calc/tabula-calculator.xlsx Retrofitting thermal envelope costs for Germany,retro_cost_germany.csv,unkown,https://www.iwu.de/forschung/handlungslogiken/kosten-energierelevanter-bau-und-anlagenteile-bei-modernisierung/ diff --git a/doc/installation.rst b/doc/installation.rst index 728061c7..3ab3d328 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -73,8 +73,8 @@ To download and extract the data bundle on the command line: .. code:: bash - projects/pypsa-eur-sec/data % wget "https://nworbmot.org/pypsa-eur-sec-data-bundle-210125.tar.gz" - projects/pypsa-eur-sec/data % tar xvzf pypsa-eur-sec-data-bundle-210125.tar.gz + projects/pypsa-eur-sec/data % wget "https://nworbmot.org/pypsa-eur-sec-data-bundle-210418.tar.gz" + projects/pypsa-eur-sec/data % tar xvzf pypsa-eur-sec-data-bundle-210418.tar.gz The data licences and sources are given in the following table. diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 56c65669..a9561857 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -6,6 +6,7 @@ Future release =================== * For the myopic investment option, a carbon budget and a type of decay (exponential or beta) can be selected in the ``config.yaml`` file to distribute the budget across the ``planning_horizons``. For example, ``cb40ex0`` in the ``{sector_opts}`` wildcard will distribute a carbon budget of 40 GtCO2 following an exponential decay with initial growth rate 0. +* The cost database for retrofitting of the thermal envelope of buildings has been updated. Now, for calculating the space heat savings of a building, losses by thermal bridges and ventilation are included as well as heat gains (internal and by solar radiation). See the section :ref:`retro` for more details on the retrofitting module. * Added an option to alter the capital cost or maximum capacity of carriers by a factor via ``carrier+factor`` in the ``{sector_opts}`` wildcard. This can be useful for exploring uncertain cost parameters. Example: ``solar+c0.5`` reduces the ``capital_cost`` of solar to 50\% of original values. Similarly ``solar+p3`` multiplies the ``p_nom_max`` by 3. * Rename the bus for European liquid hydrocarbons from ``Fischer-Tropsch`` to ``EU oil``, since it can be supplied not just with the Fischer-Tropsch process, but also with fossil oil. * Bugfix: The new separation of land transport by carrier in Version 0.4.0 failed to account for the carbon dioxide emissions from internal combustion engines. This is now treated as a negative load on the atmospheric carbon dioxide bus, just like aviation emissions. @@ -137,4 +138,4 @@ To make a new release of the data bundle, make an archive of the files in ``data .. code:: bash - data % tar pczf pypsa-eur-sec-data-bundle-YYMMDD.tar.gz eea/UNFCCC_v23.csv switzerland-sfoe biomass eurostat-energy_balances-* jrc-idees-2015 emobility urban_percent.csv timezone_mappings.csv heat_load_profile_DK_AdamJensen.csv WindWaveWEC_GLTB.xlsx myb1-2017-nitro.xls Industrial_Database.csv + data % tar pczf pypsa-eur-sec-data-bundle-YYMMDD.tar.gz eea/UNFCCC_v23.csv switzerland-sfoe biomass eurostat-energy_balances-* jrc-idees-2015 emobility urban_percent.csv timezone_mappings.csv heat_load_profile_DK_AdamJensen.csv WindWaveWEC_GLTB.xlsx myb1-2017-nitro.xls Industrial_Database.csv retro/tabula-calculator-calcsetbuilding.csv diff --git a/doc/supply_demand.rst b/doc/supply_demand.rst index 002bd16c..77317094 100644 --- a/doc/supply_demand.rst +++ b/doc/supply_demand.rst @@ -108,6 +108,43 @@ Small for decentral applications. Big water pit storage for district heating. +.. _retro: + +Retrofitting of the thermal envelope of buildings +=================================================== +Co-optimising building renovation is only enabled if in the ``config.yaml`` the +option :mod:`retro_endogen: True`. To reduce the computational burden +default setting is + +.. literalinclude:: ../config.default.yaml + :language: yaml + :lines: 134-135 + +Renovation of the thermal envelope reduces the space heating demand and is +optimised at each node for every heat bus. Renovation measures through additional +insulation material and replacement of energy inefficient windows are considered. + +In a first step, costs per energy savings are estimated in :mod:`build_retro_cost.py`. +They depend on the insulation condition of the building stock and costs for +renovation of the building elements. +In a second step, for those cost per energy savings two possible renovation +strengths are determined: a moderate renovation with lower costs and lower +maximum possible space heat savings, and an ambitious renovation with associated +higher costs and higher efficiency gains. They are added by step-wise +linearisation in form of two additional generations in +:mod:`prepare_sector_network.py`. + +Settings in the config.yaml concerning the endogenously optimisation of building +renovation + +.. literalinclude:: ../config.default.yaml + :language: yaml + :lines: 136-140 + +Further information are given in the publication + +`Mitigating heat demand peaks in buildings in a highly renewable European energy system, (2021) `_. + Hydrogen demand ================== diff --git a/scripts/build_retro_cost.py b/scripts/build_retro_cost.py index 9b34507e..cb0acf41 100644 --- a/scripts/build_retro_cost.py +++ b/scripts/build_retro_cost.py @@ -1,134 +1,142 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ -Created on Mon Jan 20 14:57:21 2020 +Created on Fri Jan 22 10:36:39 2021 -@author: bw0928 +This script should calculate the space heating savings through better +insulation of the thermal envelope of a building and corresponding costs for +different building types in different countries. -***************************************************************************** -This script calculates cost-energy_saving-curves for retrofitting -for the EU-37 countries, based on the building stock data from hotmaps and -the EU building stock database -***************************************************************************** +-----------------METHODOLOGY ------------------------------------------------ +The energy savings calculations are based on the -Structure: + EN ISO 13790 / seasonal method https://www.iso.org/obp/ui/#iso:std:iso:13790:ed-2:v1:en: - (1) set assumptions and parameters - (2) read and prepare data - (3) calculate (€-dE-curves) - (4) save in csv + - calculations heavily oriented on the TABULAWebTool + http://webtool.building-typology.eu/ + http://www.episcope.eu/fileadmin/tabula/public/docs/report/TABULA_CommonCalculationMethod.pdf + which is following the EN ISO 13790 / seasonal method -***************************************************************************** -""" + - building stock data: + mainly: hotmaps project https://gitlab.com/hotmaps/building-stock + missing: EU building observatory https://ec.europa.eu/energy/en/eu-buildings-database -import pandas as pd -import matplotlib.pyplot as plt + - building types with typical surfaces/ standard values: + - tabula https://episcope.eu/fileadmin/tabula/public/calc/tabula-calculator.xlsx -#%% ************ FUCNTIONS *************************************************** -# windows --------------------------------------------------------------- -def window_limit(l, window_assumptions): - """ - define limit u value from which on window is retrofitted - """ - m = (window_assumptions.diff()["u_limit"] / - window_assumptions.diff()["strength"]).dropna().iloc[0] - a = window_assumptions["u_limit"][0] - m * window_assumptions["strength"][0] - return m*l + a +---------------------BASIC EQUAIONS ------------------------------------------- +The basic equations: -def u_retro_window(l, window_assumptions): - """ - define retrofitting value depending on renovation strength - """ - m = (window_assumptions.diff()["u_value"] / - window_assumptions.diff()["strength"]).dropna().iloc[0] - a = window_assumptions["u_value"][0] - m * window_assumptions["strength"][0] - return max(m*l + a, 0.8) + The Energy needed for space heating E_space [W/m²] are calculated as the + sum of heat losses and heat gains: -def window_cost(u, cost_retro, window_assumptions): - """ - get costs for new windows depending on u value + E_space = H_losses - H_gains - """ - m = (window_assumptions.diff()["cost"] / - window_assumptions.diff()["u_value"]).dropna().iloc[0] - a = window_assumptions["cost"][0] - m * window_assumptions["u_value"][0] - window_cost = m*u + a - if annualise_cost: - window_cost = window_cost * interest_rate / (1 - (1 + interest_rate) - ** -cost_retro.loc["Windows", "life_time"]) - return window_cost + Heat losses constitute from the losses through heat trasmission (H_tr [W/m²K]) + (this includes heat transfer through building elements and thermal bridges) + and losses by ventilation (H_ve [W/m²K]): -# functions for intermediate steps (~l, ~area) ----------------------------- -def calculate_new_u(u_values, l, l_weight, k=0.035): - """ - calculate U-values after building retrofitting, depending on the old - U-values (u_values). - They depend for the components Roof, Wall, Floor on the additional - insulation thickness (l), and the weighting for the corresponding - component (l_weight). - Windows are renovated to new ones with U-value (function: u_retro_window(l)) - only if the are worse insulated than a certain limit value - (function: window_limit). + H_losses = (H_tr + H_ve) * F_red * (T_threshold - T_averaged_d_heat) * d_heat * 1/365 - Parameters - ---------- - u_values: pd.DataFrame - l: string - l_weight: pd.DataFrame (component, weight) - k: thermal conductivity + F_red : reduction factor, considering non-uniform heating [°C], p.16 chapter 2.6 [-] + T_threshold : heating temperature threshold, assumed 15 C + d_heat : Length of heating season, number of days with daily averaged temperature below T_threshold + T_averaged_d_heat : mean daily averaged temperature of the days within heating season d_heat - """ - return u_values.apply(lambda x: - k / ((k / x.value) + - (float(l) * l_weight.loc[x.type][0])) - if x.type!="Windows" - else (min(x.value, u_retro_window(float(l), window_assumptions)) - if x.value>window_limit(float(l), window_assumptions) else x.value), - axis=1) + Heat gains constitute from the gains by solar radiation (H_solar) and + internal heat gains (H_int) weighted by a gain utilisation factor nu: -def calculate_dE(u_values, l, average_surface_w): - """ - returns energy demand after retrofit (per unit of unrefurbished energy - demand) depending on current and retrofitted U-values, this energy demand - is weighted depending on the average surface of each component for the - building type of the assumend subsector - """ - return u_values.apply(lambda x: x[l] / x.value * - average_surface_w.loc[x.assumed_subsector, - x.type], - axis=1) + H_gains = nu * (H_solar + H_int) + +---------------- STRUCTURE OF THE SCRIPT -------------------------------------- +The script has the following structure: -def calculate_costs(u_values, l, cost_retro, average_surface): + (i) fixed parameters are set + (ii) functions + (1) prepare data, bring to same format + (2) calculate space heat demand depending on additional insulation material + (3) calculate costs for corresponding additional insulation material + (4) get cost savings per retrofitting measures for each sector by weighting + with heated floor area + +------------------------------------------------------------------------------- +@author: Lisa +""" +import pandas as pd +import xarray as xr + +# (i) --- FIXED PARAMETER / STANDARD VALUES ----------------------------------- + +# thermal conductivity standard value +k = 0.035 +# strenght of relative retrofitting depending on the component +# determined by historical data of insulation thickness for retrofitting +l_weight = pd.DataFrame({"weight": [1.95, 1.48, 1.]}, + index=["Roof", "Wall", "Floor"]) + +# standard room height [m], used to calculate heat transfer by ventilation +h_room = 2.5 +# volume specific heat capacity air [Wh/m^3K] +c_p_air = 0.34 +# internal heat capacity per m² A_c_ref [Wh/(m^2K)] +c_m = 45 +# average thermal output of the internal heat sources per m^2 reference area [W/m^2] +phi_int = 3 +# constant parameter tau_H_0 [h] according to EN 13790 seasonal method +tau_H_0 = 30 +# constant parameter alpha_H_0 [-] according to EN 13790 seasonal method +alpha_H_0 = 0.8 + +# paramter for solar heat load during heating season ------------------------- +# tabular standard values table p.8 in documenation +external_shading = 0.6 # vertical orientation: fraction of window area shaded [-] +frame_area_fraction = 0.3 # fraction of frame area of window [-] +non_perpendicular = 0.9 # reduction factor, considering radiation non perpendicular to the glazing[-] +solar_energy_transmittance = 0.5 # solar energy transmiitance for radiation perpecidular to the glazing [-] +# solar global radiation [kWh/(m^2a)] +solar_global_radiation = pd.Series([246, 401, 246, 148], + index=["east", "south", "west", "north"], + name="solar_global_radiation [kWh/(m^2a)]") + +# threshold temperature for heating [Celsius] -------------------------------- +t_threshold = 15 + +# rename sectors +# rename residential sub sectors +rename_sectors = {'Single family- Terraced houses': "SFH", + 'Multifamily houses': "MFH", + 'Appartment blocks': "AB"} + + +# additional insulation thickness, determines maximum possible savings [m] +l_strength = [ + "0.07","0.075", "0.08", "0.1", "0.15", + "0.22", "0.24", "0.26" + ] + + +# (ii) --- FUNCTIONS ---------------------------------------------------------- + +def get_average_temperature_during_heating_season(temperature, t_threshold=15): """ - returns costs for a given retrofitting strength weighted by the average - surface/volume ratio of the component for each building type + returns average temperature during heating season + input: + temperature : pd.Series(Index=time, values=temperature) + t_threshold : threshold temperature for heating degree days (HDD) + returns: + average temperature """ - return u_values.apply(lambda x: (cost_retro.loc[x.type, "cost_var"] * - 100 * float(l) * l_weight.loc[x.type][0] - + cost_retro.loc[x.type, "cost_fix"]) * - average_surface.loc[x.assumed_subsector, x.type] / - average_surface.loc[x.assumed_subsector, "surface"] - if x.type!="Windows" - else (window_cost(x[l], cost_retro, window_assumptions) * - average_surface.loc[x.assumed_subsector, x.type] / - average_surface.loc[x.assumed_subsector, "surface"] - if x.value>window_limit(float(l), window_assumptions) else 0), - axis=1) + t_average_daily = temperature.resample("1D").mean() + return t_average_daily.loc[t_average_daily < t_threshold].mean() -# --------------------------------------------------------------------------- def prepare_building_stock_data(): """ reads building stock data and cleans up the format, returns -------- u_values: pd.DataFrame current U-values - average_surface: pd.DataFrame (index= building type, - columns = [surface [m],height [m], - components area [m^2]]) - average_surface_w: pd.DataFrame weighted share of the components per - building type area_tot: heated floor area per country and sector [Mm²] area: heated floor area [Mm²] for country, sector, building type and period @@ -140,11 +148,14 @@ def prepare_building_stock_data(): # standardize data building_data["type"].replace( - {'Covered area: heated [Mm²]': 'Heated area [Mm²]', - 'Windows ': 'Windows', - 'Walls ': 'Walls', - 'Roof ': 'Roof', - 'Floor ': 'Floor'}, inplace=True) + {'Covered area: heated [Mm²]': 'Heated area [Mm²]', + 'Windows ': 'Window', + 'Windows': 'Window', + 'Walls ': 'Wall', + 'Walls': 'Wall', + 'Roof ': 'Roof', + 'Floor ': 'Floor', + }, inplace=True) building_data.country_code = building_data.country_code.str.upper() building_data["subsector"].replace({'Hotels and Restaurants': @@ -152,6 +163,7 @@ def prepare_building_stock_data(): building_data["sector"].replace({'Residential sector': 'residential', 'Service sector': 'services'}, inplace=True) + # extract u-values u_values = building_data[(building_data.feature.str.contains("U-values")) & (building_data.subsector != "Total")] @@ -209,6 +221,8 @@ def prepare_building_stock_data(): # u_values for Poland are missing -> take them from eurostat ----------- u_values_PL = pd.read_csv(snakemake.input.u_values_PL) + u_values_PL.component.replace({"Walls":"Wall", "Windows": "Window"}, + inplace=True) area_PL = area.loc["Poland"].reset_index() data_PL = pd.DataFrame(columns=u_values.columns, index=area_PL.index) data_PL["country"] = "Poland" @@ -233,41 +247,152 @@ def prepare_building_stock_data(): # smallest possible today u values for windows 0.8 (passive house standard) # maybe the u values for the glass and not the whole window including frame # for those types assumed in the dataset - u_values.loc[(u_values.type=="Windows") & (u_values.value<0.8), "value"] = 0.8 + u_values.loc[(u_values.type=="Window") & (u_values.value<0.8), "value"] = 0.8 # drop unnecessary columns u_values.drop(['topic', 'feature','detail', 'estimated','unit'], axis=1, inplace=True, errors="ignore") + + + u_values = u_values.apply(lambda x: x.replace(rename_sectors)) + + # for missing weighting of surfaces of building types assume MFH + u_values["assumed_subsector"] = u_values.subsector + u_values.loc[~u_values.subsector.isin(rename_sectors.values()), + "assumed_subsector"] = 'MFH' + + u_values.country_code.replace({"UK":"GB"}, inplace=True) + u_values.bage.replace({'Berfore 1945':'Before 1945'}, inplace=True) + u_values = u_values[~u_values.bage.isna()] + + u_values.set_index(["country_code", "subsector", "bage", "type"], + inplace=True) + # only take in config.yaml specified countries into account countries = ct_total.index area_tot = area_tot.loc[countries] - # average component surface -------------------------------------------------- - average_surface = (pd.read_csv(snakemake.input.average_surface, - nrows=3, - header=1, - index_col=0).rename( - {'Single/two family house': 'Single family- Terraced houses', - 'Large apartment house': 'Multifamily houses', - 'Apartment house': 'Appartment blocks'}, - axis="index")).iloc[:, :6] - average_surface.columns = ["surface", "height", "Roof", - "Walls", "Floor", "Windows"] - # get area share of component - average_surface_w = average_surface[components].apply(lambda x: x / x.sum(), - axis=1) + return u_values, country_iso_dic, countries, area_tot, area - return (u_values, average_surface, - average_surface_w, area_tot, area, country_iso_dic, countries) -def prepare_cost_retro(): +def prepare_building_topology(u_values, same_building_topology=True): + """ + reads in typical building topologies (e.g. average surface of building elements) + and typical losses trough thermal bridging and air ventilation + """ + + data_tabula = pd.read_csv(snakemake.input.data_tabula, + skiprows=lambda x: x in range(1,11), + low_memory=False).iloc[:2974] + + parameters = ["Code_Country", + # building type (SFH/MFH/AB) + "Code_BuildingSizeClass", + # time period of build year + "Year1_Building", "Year2_Building", + # areas [m^2] + "A_C_Ref", # conditioned area, internal + "A_Roof_1", "A_Roof_2", "A_Wall_1", "A_Wall_2", + "A_Floor_1", "A_Floor_2", "A_Window_1", "A_Window_2", + # for air ventilation loses [1/h] + "n_air_use", "n_air_infiltration", + # for losses due to thermal bridges, standard values [W/(m^2K)] + "delta_U_ThermalBridging", + # floor area related heat transfer coefficient by transmission [-] + "F_red_temp", + # refurbishment state [1: not refurbished, 2: moderate ,3: strong refurbishment] + 'Number_BuildingVariant', + ] + + data_tabula = data_tabula[parameters] + + building_elements = ["Roof", "Wall", "Floor", "Window"] + + # get total area of building components + for element in building_elements: + elements = ["A_{}_1".format(element), + "A_{}_2".format(element)] + data_tabula = pd.concat([data_tabula.drop(elements, axis=1), + data_tabula[elements].sum(axis=1).rename("A_{}".format(element))], + axis=1) + + # clean data + data_tabula = data_tabula.loc[pd.concat([data_tabula[col]!=0 for col in + ["A_Wall", "A_Floor", "A_Window", "A_Roof", "A_C_Ref"]], + axis=1).all(axis=1)] + data_tabula = data_tabula[data_tabula.Number_BuildingVariant.isin([1,2,3])] + data_tabula = data_tabula[data_tabula.Code_BuildingSizeClass.isin(["AB", "SFH", "MFH", "TH"])] + + + + # map tabula building periods to hotmaps building periods + def map_periods(build_year1, build_year2): + periods = {(0, 1945): 'Before 1945', + (1945,1969) : '1945 - 1969', + (1970, 1979) :'1970 - 1979', + (1980, 1989) : '1980 - 1989', + (1990, 1999) :'1990 - 1999', + (2000, 2010) : '2000 - 2010', + (2010, 10000) : 'Post 2010'} + minimum = 1e5 + for key in periods: + diff = abs(build_year1-key[0]) + abs(build_year2-key[1]) + if diff < minimum: + minimum = diff + searched_period = periods[key] + return searched_period + + data_tabula["bage"] = data_tabula.apply(lambda x: map_periods(x.Year1_Building, x.Year2_Building), + axis=1) + + # set new index + data_tabula = data_tabula.set_index(['Code_Country', 'Code_BuildingSizeClass', + 'bage', 'Number_BuildingVariant']) + + # get typical building topology + area_cols = ['A_C_Ref', 'A_Floor', 'A_Roof', 'A_Wall', 'A_Window'] + typical_building = (data_tabula.groupby(level=[1,2]).mean() + .rename(index={"TH": "SFH"}).groupby(level=[0,1]).mean()) + + # drop duplicates + data_tabula = data_tabula[~data_tabula.index.duplicated(keep="first")] + + # fill missing values + hotmaps_data_i = u_values.reset_index().set_index(["country_code", "assumed_subsector", + "bage"]).index + # missing countries in tabular + missing_ct = data_tabula.unstack().reindex(hotmaps_data_i.unique()) + # areas should stay constant for different retrofitting measures + cols_constant = ['Year1_Building', 'Year2_Building', 'A_C_Ref','A_Roof', + 'A_Wall', 'A_Floor', 'A_Window'] + for col in cols_constant: + missing_ct[col] = missing_ct[col].combine_first(missing_ct[col] + .groupby(level=[0,1,2]).mean()) + missing_ct = missing_ct.unstack().unstack().fillna(missing_ct.unstack() + .unstack().mean()) + data_tabula = missing_ct.stack(level=[-1,-2, -3],dropna=False) + + # sets for different countries same building topology which only depends on + # build year and subsector (MFH, SFH, AB) + if same_building_topology: + typical_building = ((typical_building.reindex(data_tabula.droplevel(0).index)) + .set_index(data_tabula.index)) + data_tabula.update(typical_building[area_cols]) + + # total buildings envelope surface [m^2] + data_tabula["A_envelope"] = data_tabula[["A_{}".format(element) for + element in building_elements]].sum(axis=1) + + return data_tabula + + +def prepare_cost_retro(country_iso_dic): """ read and prepare retro costs, annualises them if annualise_cost=True """ cost_retro = pd.read_csv(snakemake.input.cost_germany, nrows=4, index_col=0, usecols=[0, 1, 2, 3]) - cost_retro.index = cost_retro.index.str.capitalize() - cost_retro.rename(index={"Window": "Windows", "Wall": "Walls"}, inplace=True) + cost_retro.rename(lambda x: x.capitalize(), inplace=True) window_assumptions = pd.read_csv(snakemake.input.window_assumptions, skiprows=[1], usecols=[0,1,2,3], nrows=2) @@ -279,83 +404,425 @@ def prepare_cost_retro(): ** -cost_retro.loc[x.index, "life_time"]))) - return cost_retro, window_assumptions + # weightings of costs --------------------------------------------- + if construction_index: + cost_w = pd.read_csv(snakemake.input.construction_index, + skiprows=3, nrows=32, index_col=0) + # since German retrofitting costs are assumed + cost_w = ((cost_w["2018"] / cost_w.loc["Germany", "2018"]) + .rename(index=country_iso_dic)) + else: + cost_w = None + + if tax_weighting: + tax_w = pd.read_csv(snakemake.input.tax_w, + header=12, nrows=39, index_col=0, usecols=[0, 4]) + tax_w.rename(index=country_iso_dic, inplace=True) + tax_w = tax_w.apply(pd.to_numeric, errors='coerce').iloc[:, 0] + tax_w.dropna(inplace=True) + else: + tax_w = None + + + return cost_retro, window_assumptions, cost_w, tax_w -def calculate_cost_energy_curve(u_values, l_strength, l_weight, average_surface_w, - average_surface, area, country_iso_dic, - countries): +def prepare_temperature_data(): """ - returns energy demand per unit of unrefurbished (dE) and cost for given - renovation strength (l_strength), data for missing countries is - approximated by countries with similar building stock (dict:map_for_missings) + returns the temperature dependent data for each country: + + d_heat : length of heating season pd.Series(index=countries) [days/year] + on those days, daily average temperature is below + threshold temperature t_threshold + temperature_factor : accumulated difference between internal and + external temperature pd.Series(index=countries) ([K]) * [days/year] + + temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365 - parameter - -------- input ----------- - u_values: pd.DataFrame current U-values - l_strength: list of strings (strength of retrofitting) - l_weight: pd.DataFrame (component, weight) - average_surface: pd.DataFrame (index= building type, - columns = [surface [m],height [m], - components area [m^2]]) - average_surface_w: pd.DataFrame weighted share of the components per - building type - area: heated floor area [Mm²] for country, sector, building - type and period - country_iso_dic: dict (maps country name to 2-letter-iso-code) - countries: pd.Index (specified countries in config.yaml) - -------- output ---------- - res: pd.DataFrame(index=pd.MultiIndex([country, sector]), - columns=pd.MultiIndex([(dE/cost), l_strength])) """ + temperature = xr.open_dataarray(snakemake.input.air_temperature).T.to_pandas() + d_heat = (temperature.groupby(temperature.columns.str[:2], axis=1).mean() + .resample("1D").mean()window_limit(float(l), window_assumptions) else 0), + axis=1) + + +def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): + """ + calculate U-values after building retrofitting, depending on the old + U-values (u_values). This is for simple insulation measuers, adding + an additional layer of insulation. + They depend for the components Roof, Wall, Floor on the additional + insulation thickness (l), and the weighting for the corresponding + component (l_weight). + + Windows are renovated to new ones with U-value (function: u_retro_window(l)) + only if the are worse insulated than a certain limit value + (function: window_limit). + + Parameters + ---------- + u_values: pd.DataFrame + l: string + l_weight: pd.DataFrame (component, weight) + k: thermal conductivity + + """ + return u_values.apply(lambda x: + k / ((k / x.value) + + (float(l) * l_weight.loc[x.name[3]])) + if x.name[3]!="Window" + else (min(x.value, u_retro_window(float(l), window_assumptions)) + if x.value>window_limit(float(l), window_assumptions) else x.value), + axis=1) + + +def map_tabula_to_hotmaps(df_tabula, df_hotmaps, column_prefix): + """ + maps tabula data to hotmaps data with wished column name prefix + + Parameters + ---------- + df_tabula : pd.Series + tabula data with pd.MultiIndex + df_hotmaps : pd.DataFrame + dataframe with hotmaps pd.MultiIndex + column_prefix : string + column prefix to rename column names of df_tabula + + Returns + ------- + pd.DataFrame (index=df_hotmaps.index) + returns df_tabula with hotmaps index + + """ + values = (df_tabula.unstack() + .reindex(df_hotmaps.rename(index = + lambda x: "MFH" if x not in rename_sectors.values() + else x, level=1).index)) + values.columns = pd.MultiIndex.from_product([[column_prefix], values.columns]) + values.index = df_hotmaps.index + return values + + +def get_solar_gains_per_year(window_area): + """ + returns solar heat gains during heating season in [kWh/a] depending on + the window area [m^2] of the building, assuming a equal distributed window + orientation (east, south, north, west) + """ + return sum(external_shading * frame_area_fraction * non_perpendicular + * 0.25 * window_area * solar_global_radiation) + + +def map_to_lstrength(l_strength, df): + """ + renames column names from a pandas dataframe to map tabula retrofitting + strengths [2 = moderate, 3 = ambitious] to l_strength + """ + middle = len(l_strength) // 2 + map_to_l = pd.MultiIndex.from_arrays([middle*[2] + len(l_strength[middle:])*[3],l_strength]) + l_strength_df = (df.stack(-2).reindex(map_to_l, axis=1, level=0) + .droplevel(0, axis=1).unstack().swaplevel(axis=1).dropna(axis=1)) + return pd.concat([df.drop([2,3], axis=1, level=1), l_strength_df], axis=1) + + +def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor): + """ + calculates total annual heat losses Q_ht for different insulation thiknesses + (l_strength), depening on current insulation state (u_values), standard + building topologies and air ventilation from TABULA (data_tabula) and + the accumulated difference between internal and external temperature + during the heating season (temperature_factor). + + Total annual heat losses Q_ht constitute from losses by: + (1) transmission (H_tr_e) + (2) thermal bridges (H_tb) + (3) ventilation (H_ve) + weighted by a factor (F_red_temp) which is taken account for non-uniform heating + and the temperature factor of the heating season + + Q_ht [W/m^2] = (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K] + + returns Q_ht as pd.DataFrame(index=['country_code', 'subsector', 'bage'], + columns=[current (1.) + retrofitted (l_strength)]) + + """ + # (1) by transmission + # calculate new U values of building elements due to additional insulation for l in l_strength: - u_values[l] = calculate_new_u(u_values, l, l_weight) - energy_saved = pd.concat([energy_saved, - calculate_dE(u_values, l, average_surface_w).rename(l)], - axis=1) - costs = pd.concat([costs, - calculate_costs(u_values, l, cost_retro, average_surface).rename(l)], - axis=1) + u_values["new_U_{}".format(l)] = calculate_new_u(u_values, + l, l_weight, window_assumptions) + # surface area of building components [m^2] + area_element = (data_tabula[["A_{}".format(e) for e in u_values.index.levels[3]]] + .rename(columns=lambda x: x[2:]).stack().unstack(-2).stack()) + u_values["A_element"] = map_tabula_to_hotmaps(area_element, + u_values, "A_element").xs(1, level=1, axis=1) + + # heat transfer H_tr_e [W/m^2K] through building element + # U_e * A_e / A_C_Ref + columns = ["value"] + ["new_U_{}".format(l) for l in l_strength] + heat_transfer = pd.concat([u_values[columns].mul(u_values.A_element, axis=0), + u_values.A_element], axis=1) + # get real subsector back in index + heat_transfer.index = u_values.index + heat_transfer = heat_transfer.groupby(level=[0,1,2]).sum() + + # rename columns of heat transfer H_tr_e [W/K] and envelope surface A_envelope [m^2] + heat_transfer.rename(columns={"A_element":"A_envelope", + },inplace=True) + + # map reference area + heat_transfer["A_C_Ref"] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, + heat_transfer, + "A_C_Ref").xs(1.,level=1,axis=1) + u_values["A_C_Ref"] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, + u_values, + "A_C_Ref").xs(1.,level=1,axis=1) + + # get heat transfer by transmission through building element [W/(m^2K)] + heat_transfer_perm2 = heat_transfer[columns].div(heat_transfer.A_C_Ref, axis=0) + heat_transfer_perm2.columns = pd.MultiIndex.from_product([["H_tr_e"], [1.] + l_strength]) + + # (2) heat transfer by thermal bridges H_tb [W/(m^2K)] + # H_tb = delta_U [W/(m^2K)]* A_envelope [m^2] / A_C_Ref [m^2] + H_tb_tabula = data_tabula.delta_U_ThermalBridging * data_tabula.A_envelope / data_tabula.A_C_Ref + heat_transfer_perm2 = pd.concat([heat_transfer_perm2, + map_tabula_to_hotmaps(H_tb_tabula, heat_transfer_perm2, "H_tb")], axis=1) + + + # (3) by ventilation H_ve [W/(m²K)] + # = c_p_air [Wh/(m^3K)] * (n_air_use + n_air_infilitraion) [1/h] * h_room [m] + H_ve_tabula = (data_tabula.n_air_infiltration + data_tabula.n_air_use) * c_p_air * h_room + heat_transfer_perm2 = pd.concat([heat_transfer_perm2, + map_tabula_to_hotmaps(H_ve_tabula, heat_transfer_perm2, "H_ve")], + axis=1) + + + # F_red_temp factor which is taken account for non-uniform heating e.g. + # lower heating/switch point during night times/weekends + # effect is significant for buildings with poor insulation + # for well insulated buildings/passive houses it has nearly no effect + # based on tabula values depending on the building type + F_red_temp = map_tabula_to_hotmaps(data_tabula.F_red_temp, + heat_transfer_perm2, + "F_red_temp") + # total heat transfer Q_ht [W/m^2] = + # (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K] + # temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365 + heat_transfer_perm2 = map_to_lstrength(l_strength, heat_transfer_perm2) + F_red_temp = map_to_lstrength(l_strength, F_red_temp) + + Q_ht = (heat_transfer_perm2.groupby(level=1,axis=1).sum() + .mul(F_red_temp.droplevel(0, axis=1)) + .mul(temperature_factor.reindex(heat_transfer_perm2.index,level=0), axis=0)) + + return Q_ht, heat_transfer_perm2 + + +def calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat): + """ + calculates heat gains Q_gain [W/m^2], which consititure from gains by: + (1) solar radiation + (2) internal heat gains + + """ + # (1) by solar radiation H_solar [W/m^2] + # solar radiation [kWhm^2/a] / A_C_Ref [m^2] *1e3[1/k] / 8760 [a/h] + H_solar = (data_tabula.A_Window.apply(lambda x: get_solar_gains_per_year(x)) + / data_tabula.A_C_Ref * 1e3 / 8760) + + Q_gain = map_tabula_to_hotmaps(H_solar, heat_transfer_perm2, "H_solar").xs(1.,level=1, axis=1) + + # (2) by internal H_int + # phi [W/m^2] * d_heat [d/a] * 1/365 [a/d] -> W/m^2 + Q_gain["H_int"] = (phi_int * d_heat * 1/365).reindex(index=heat_transfer_perm2.index, level=0) + + return Q_gain + +def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain): + """ + calculates gain utilisation factor nu + """ + # time constant of the building tau [h] = c_m [Wh/(m^2K)] * 1 /(H_tr_e+H_tb*H_ve) [m^2 K /W] + tau = c_m / heat_transfer_perm2.groupby(level=1,axis=1).sum() + alpha = alpha_H_0 + (tau/tau_H_0) + # heat balance ratio + gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0) + # gain utilisation factor + nu = (1 - gamma**alpha) / (1 - gamma**(alpha+1)) + + return nu + + +def calculate_space_heat_savings(u_values, data_tabula, l_strength, + temperature_factor, d_heat): + """ + calculates space heat savings (dE_space [per unit of unrefurbished state]) + through retrofitting of the thermal envelope by additional insulation + material (l_strength[m]) + """ + # heat losses Q_ht [W/m^2] + Q_ht, heat_transfer_perm2 = calculate_heat_losses(u_values, data_tabula, + l_strength, temperature_factor) + # heat gains Q_gain [W/m^2] + Q_gain = calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat) + + # calculate gain utilisation factor nu [dimensionless] + nu = calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain) + + # total space heating demand E_space + E_space = Q_ht - nu.mul(Q_gain.sum(axis=1), axis=0) + dE_space = E_space.div(E_space[1.], axis=0).iloc[:, 1:] + dE_space.columns = pd.MultiIndex.from_product([["dE"], l_strength]) + + return dE_space + + +def calculate_retro_costs(u_values, l_strength, cost_retro): + """ + returns costs of different retrofitting measures + """ + costs = pd.concat([calculate_costs(u_values, l, cost_retro, window_assumptions).rename(l) + for l in l_strength], axis=1) # energy and costs per country, sector, subsector and year - e_tot = energy_saved.groupby(['country', 'sector', 'subsector', 'bage']).sum() - cost_tot = costs.groupby(['country', 'sector', 'subsector', 'bage']).sum() - - # weighting by area -> energy and costs per country and sector - # in case of missing data first concat - energy_saved = pd.concat([e_tot, area.weight], axis=1) - cost_res = pd.concat([cost_tot, area.weight], axis=1) - energy_saved = (energy_saved.apply(lambda x: x * x.weight, axis=1) - .groupby(level=[0, 1]).sum()) - cost_res = (cost_res.apply(lambda x: x * x.weight, axis=1) - .groupby(level=[0, 1]).sum()) - - res = pd.concat([energy_saved[l_strength], cost_res[l_strength]], - axis=1, keys=["dE", "cost"]) - res.rename(index=country_iso_dic, inplace=True) - - res = res.reindex(index=countries, level=0) - # reset index because otherwise not considered countries still in index.levels[0] - res = res.reset_index().set_index(["country", "sector"]) + cost_tot = costs.groupby(level=['country_code', 'subsector', 'bage']).sum() + cost_tot.columns = pd.MultiIndex.from_product([["cost"], cost_tot.columns]) + + return cost_tot + + +def sample_dE_costs_area(area, area_tot, costs, dE_space, countries, + construction_index, tax_weighting): + """ + bring costs and energy savings together, fill area and costs per energy + savings for missing countries, weight costs, + determine "moderate" and "ambitious" retrofitting + """ + sub_to_sector_dict = (area.reset_index().replace(rename_sectors) + .set_index("subsector")["sector"].to_dict()) + + area_reordered = ((area.rename(index=country_iso_dic, level=0) + .rename(index=rename_sectors, level=2) + .reset_index()).rename(columns={"country":"country_code"}) + .set_index(["country_code", "subsector", "bage"])) + + cost_dE =(pd.concat([costs, dE_space], axis=1) + .mul(area_reordered.weight, axis=0) + .rename(sub_to_sector_dict,level=1).groupby(level=[0,1]).sum()) # map missing countries - for ct in pd.Index(map_for_missings.keys()).intersection(countries): - averaged_data = res.reindex(index=map_for_missings[ct], level=0).mean(level=1) - index = pd.MultiIndex.from_product([[ct], averaged_data.index.to_list()]) - averaged_data.index = index - if ct not in res.index.levels[0]: - res = res.append(averaged_data) - else: - res.loc[averaged_data.index] = averaged_data + for ct in countries.difference(cost_dE.index.levels[0]): + averaged_data = (cost_dE.reindex(index=map_for_missings[ct], level=0).mean(level=1) + .set_index(pd.MultiIndex + .from_product([[ct], cost_dE.index.levels[1]]))) + cost_dE = cost_dE.append(averaged_data) - return res + # weights costs after construction index + if construction_index: + for ct in list(map_for_missings.keys() - cost_w.index): + cost_w.loc[ct] = cost_w.reindex(index=map_for_missings[ct]).mean() + cost_dE.cost = cost_dE.cost.mul(cost_w, level=0, axis=0) -# %% **************** MAIN ************************************************ + # weights cost depending on country taxes + if tax_weighting: + for ct in list(map_for_missings.keys() - tax_w.index): + tax_w[ct] = tax_w.reindex(index=map_for_missings[ct]).mean() + cost_dE.cost = cost_dE.cost.mul(tax_w, level=0, axis=0) + + # drop not considered countries + cost_dE = cost_dE.reindex(countries,level=0) + # get share of residential and sevice floor area + sec_w = area_tot.value / area_tot.value.groupby(level=0).sum() + # get the total cost-energy-savings weight by sector area + tot = (cost_dE.mul(sec_w, axis=0).groupby(level="country_code").sum() + .set_index(pd.MultiIndex + .from_product([cost_dE.index.unique(level="country_code"), ["tot"]]))) + cost_dE = cost_dE.append(tot).unstack().stack() + + summed_area = (pd.DataFrame(area_tot.groupby("country").sum()) + .set_index(pd.MultiIndex.from_product( + [area_tot.index.unique(level="country"), ["tot"]]))) + area_tot = area_tot.append(summed_area).unstack().stack() + + + + cost_per_saving = (cost_dE["cost"] / (1-cost_dE["dE"])) #.diff(axis=1).dropna(axis=1) + + + moderate_min = cost_per_saving.idxmin(axis=1) + moderate_dE_cost = pd.concat([cost_dE.loc[i].xs(moderate_min.loc[i], level=1) + for i in moderate_min.index], axis=1).T + moderate_dE_cost.columns = pd.MultiIndex.from_product([moderate_dE_cost.columns, + ["moderate"]]) + + ambitious_dE_cost = cost_dE.xs("0.26", level=1,axis=1) + ambitious_dE_cost.columns = pd.MultiIndex.from_product([ambitious_dE_cost.columns, + ["ambitious"]]) + + cost_dE_new = pd.concat([moderate_dE_cost, ambitious_dE_cost], axis=1) + + return cost_dE_new, area_tot + + +#%% --- MAIN -------------------------------------------------------------- if __name__ == "__main__": # for testing if 'snakemake' not in globals(): @@ -366,16 +833,17 @@ def calculate_cost_energy_curve(u_values, l_strength, l_weight, average_surface_ wildcards=dict( network='elec', simpl='', - clusters='37', + clusters='48', lv='1', opts='Co2L-3H', sector_opts="[Co2L0p0-168H-T-H-B-I]"), input=dict( building_stock="data/retro/data_building_stock.csv", + data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv", u_values_PL="data/retro/u_values_poland.csv", + air_temperature = "resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", tax_w="data/retro/electricity_taxes_eu.csv", construction_index="data/retro/comparative_level_investment.csv", - average_surface="data/retro/average_surface_components.csv", floor_area_missing="data/retro/floor_area_missing.csv", clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", cost_germany="data/retro/retro_cost_germany.csv", @@ -387,21 +855,13 @@ def calculate_cost_energy_curve(u_values, l_strength, l_weight, average_surface_ with open('config.yaml', encoding='utf8') as f: snakemake.config = yaml.safe_load(f) -# ******** (1) ASSUMPTIONS - PARAMETERS ********************************** +# ******** config ********************************************************* + retro_opts = snakemake.config["sector"]["retrofitting"] interest_rate = retro_opts["interest_rate"] annualise_cost = retro_opts["annualise_cost"] # annualise the investment costs tax_weighting = retro_opts["tax_weighting"] # weight costs depending on taxes in countries construction_index = retro_opts["construction_index"] # weight costs depending on labour/material costs per ct - # additional insulation thickness, determines maximum possible savings - l_strength = retro_opts["l_strength"] - - k = 0.035 # thermal conductivity standard value - # strenght of relative retrofitting depending on the component - # determined by historical data of insulation thickness for retrofitting - l_weight = pd.DataFrame({"weight": [1.95, 1.48, 1.]}, - index=["Roof", "Walls", "Floor"]) - # mapping missing countries by neighbours map_for_missings = { @@ -414,72 +874,31 @@ def calculate_cost_energy_curve(u_values, l_strength, l_weight, average_surface_ "NO": ["SE"], } -# %% ************ (2) DATA *************************************************** +# (1) prepare data ********************************************************** # building stock data ----------------------------------------------------- - (u_values, average_surface, average_surface_w, - area_tot, area, country_iso_dic, countries) = prepare_building_stock_data() - + # hotmaps u_values, heated floor areas per sector + u_values, country_iso_dic, countries, area_tot, area = prepare_building_stock_data() + # building topology, thermal bridges, ventilation losses + data_tabula = prepare_building_topology(u_values) # costs for retrofitting ------------------------------------------------- - cost_retro, window_assumptions = prepare_cost_retro() + cost_retro, window_assumptions, cost_w, tax_w = prepare_cost_retro(country_iso_dic) + # temperature dependend parameters + d_heat, temperature_factor = prepare_temperature_data() - # weightings of costs - if construction_index: - cost_w = pd.read_csv(snakemake.input.construction_index, - skiprows=3, nrows=32, index_col=0) - # since German retrofitting costs are assumed - cost_w = ((cost_w["2018"] / cost_w.loc["Germany", "2018"]) - .rename(index=country_iso_dic)) - if tax_weighting: - tax_w = pd.read_csv(snakemake.input.tax_w, - header=12, nrows=39, index_col=0, usecols=[0, 4]) - tax_w.rename(index=country_iso_dic, inplace=True) - tax_w = tax_w.apply(pd.to_numeric, errors='coerce').iloc[:, 0] - tax_w.dropna(inplace=True) - -# %% ********** (3) CALCULATE COST-ENERGY-CURVES **************************** +# (2) space heat savings **************************************************** + dE_space = calculate_space_heat_savings(u_values, data_tabula, l_strength, + temperature_factor, d_heat) - # for missing weighting of surfaces of building types assume MultiFamily houses - u_values["assumed_subsector"] = u_values.subsector - u_values.loc[~u_values.subsector.isin(average_surface.index), - "assumed_subsector"] = 'Multifamily houses' +# (3) costs ***************************************************************** + costs = calculate_retro_costs(u_values, l_strength, cost_retro) - dE_and_cost = calculate_cost_energy_curve(u_values, l_strength, l_weight, - average_surface_w, average_surface, area, - country_iso_dic, countries) - # reset index because otherwise not considered countries still in index.levels[0] - dE_and_cost = dE_and_cost.reset_index().set_index(["country", "sector"]) +# (4) cost-dE and area per sector ******************************************* + cost_dE, area_tot = sample_dE_costs_area(area, area_tot, costs, dE_space, countries, + construction_index, tax_weighting) - # weights costs after construction index - if construction_index: - for ct in list(map_for_missings.keys() - cost_w.index): - cost_w.loc[ct] = cost_w.reindex(index=map_for_missings[ct]).mean() - dE_and_cost.cost = dE_and_cost.cost.apply(lambda x: x * cost_w[x.index.levels[0]]) - - # weights cost depending on country taxes - if tax_weighting: - for ct in list(map_for_missings.keys() - tax_w.index): - tax_w[ct] = tax_w.reindex(index=map_for_missings[ct]).mean() - dE_and_cost.cost = dE_and_cost.cost.apply(lambda x: x * tax_w[x.index.levels[0]]) - - # get share of residential and sevice floor area - sec_w = (area_tot / area_tot.groupby(["country"]).sum())["value"] - # get the total cost-energy-savings weight by sector area - tot = dE_and_cost.apply(lambda col: col * sec_w, axis=0).groupby(level=0).sum() - tot.set_index(pd.MultiIndex.from_product([list(tot.index), ["tot"]]), - inplace=True) - dE_and_cost = dE_and_cost.append(tot).unstack().stack() - - summed_area = pd.DataFrame(area_tot.groupby("country").sum()) - summed_area.set_index(pd.MultiIndex.from_product( - [list(summed_area.index), ["tot"]]), inplace=True) - area_tot = area_tot.append(summed_area).unstack().stack() - -# %% ******* (4) SAVE ************************************************ - - dE_and_cost.to_csv(snakemake.output.retro_cost) +# save ********************************************************************* + cost_dE.to_csv(snakemake.output.retro_cost) area_tot.to_csv(snakemake.output.floor_area) - - diff --git a/scripts/make_summary.py b/scripts/make_summary.py index 1b12ee6d..518874ef 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -196,25 +196,25 @@ def calculate_costs(n,label,costs): return costs -def calculate_cumulative_cost(): +def calculate_cumulative_cost(): planning_horizons = snakemake.config['scenario']['planning_horizons'] cumulative_cost = pd.DataFrame(index = df["costs"].sum().index, columns=pd.Series(data=np.arange(0,0.1, 0.01), name='social discount rate')) - + #discount cost and express them in money value of planning_horizons[0] for r in cumulative_cost.columns: cumulative_cost[r]=[df["costs"].sum()[index]/((1+r)**(index[-1]-planning_horizons[0])) for index in cumulative_cost.index] - + #integrate cost throughout the transition path - for r in cumulative_cost.columns: + for r in cumulative_cost.columns: for cluster in cumulative_cost.index.get_level_values(level=0).unique(): for lv in cumulative_cost.index.get_level_values(level=1).unique(): for sector_opts in cumulative_cost.index.get_level_values(level=2).unique(): cumulative_cost.loc[(cluster, lv, sector_opts,'cumulative cost'),r] = np.trapz(cumulative_cost.loc[idx[cluster, lv, sector_opts,planning_horizons],r].values, x=planning_horizons) - return cumulative_cost - + return cumulative_cost + def calculate_nodal_capacities(n,label,nodal_capacities): #Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): @@ -285,7 +285,7 @@ def calculate_supply(n,label,supply): for c in n.iterate_components(n.one_port_components): - items = c.df.index[c.df.bus.map(bus_map)] + items = c.df.index[c.df.bus.map(bus_map).fillna(False)] if len(items) == 0: continue @@ -330,7 +330,7 @@ def calculate_supply_energy(n,label,supply_energy): for c in n.iterate_components(n.one_port_components): - items = c.df.index[c.df.bus.map(bus_map)] + items = c.df.index[c.df.bus.map(bus_map).fillna(False)] if len(items) == 0: continue @@ -611,7 +611,7 @@ def to_csv(df): print(networks_dict) Nyears = 1 - + costs_db = prepare_costs(snakemake.input.costs, snakemake.config['costs']['USD2013_to_EUR2013'], snakemake.config['costs']['discountrate'], @@ -623,10 +623,9 @@ def to_csv(df): df["metrics"].loc["total costs"] = df["costs"].sum() to_csv(df) - + if snakemake.config["foresight"]=='myopic': cumulative_cost=calculate_cumulative_cost() cumulative_cost.to_csv(snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/csvs/cumulative_cost.csv') - \ No newline at end of file diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 185ca06e..f30fad57 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -248,6 +248,14 @@ def remove_elec_base_techs(n): n.carriers.drop(to_remove, inplace=True, errors="ignore") +def remove_non_electric_buses(n): + """ + remove buses from pypsa-eur with carriers which are not AC buses + """ + print("drop buses from PyPSA-Eur with carrier: ", n.buses[~n.buses.carrier.isin(["AC", "DC"])].carrier.unique()) + n.buses = n.buses[n.buses.carrier.isin(["AC", "DC"])] + + def add_co2_tracking(n): @@ -1174,11 +1182,10 @@ def add_heat(network): urban_fraction = options['central_fraction']*pop_layout["urban"]/(pop_layout[["urban","rural"]].sum(axis=1)) - # building retrofitting, exogenously reduce space heat demand - if options["retrofitting"]["retro_exogen"]: - dE = get_parameter(options["retrofitting"]["dE"]) - print("retrofitting exogenously, assumed space heat reduction of ", - dE) + # exogenously reduce space heat demand + if options["reduce_space_heat_exogenously"]: + dE = get_parameter(options["reduce_space_heat_exogenously_factor"]) + print("assumed space heat reduction of {} %".format(dE*100)) for sector in sectors: heat_demand[sector + " space"] = (1-dE)*heat_demand[sector + " space"] @@ -1908,8 +1915,7 @@ def get_parameter(item): retro_cost_energy = "resources/retro_cost_elec_s{simpl}_{clusters}.csv", floor_area = "resources/floor_area_elec_s{simpl}_{clusters}.csv" ), - output=['results/version-cb48be3/prenetworks/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'] - + output=['results/version-cb48be3/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'] ) import yaml with open('config.yaml', encoding='utf8') as f: @@ -1949,6 +1955,8 @@ def get_parameter(item): n.loads["carrier"] = "electricity" + remove_non_electric_buses(n) + n.buses["location"] = n.buses.index update_wind_solar_costs(n, costs)