Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Linting refactor #880

Merged
merged 5 commits into from
Jan 19, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions scripts/_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@
import sys
import time

from memory_profiler import _get_memory, choose_backend

logger = logging.getLogger(__name__)

# TODO: provide alternative when multiprocessing is not available
try:
from multiprocessing import Pipe, Process
except ImportError:
from multiprocessing.dummy import Process, Pipe

from memory_profiler import _get_memory, choose_backend
from multiprocessing.dummy import Pipe, Process


# The memory logging facilities have been adapted from memory_profiler
Expand Down
27 changes: 21 additions & 6 deletions scripts/_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@
import pytz
import requests
import yaml
from pypsa.components import component_attrs, components
from pypsa.descriptors import Dict
from tqdm import tqdm

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -362,17 +360,34 @@ def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
return week_df


def parse(l):
return yaml.safe_load(l[0]) if len(l) == 1 else {l.pop(0): parse(l)}
def parse(infix):
"""
Recursively parse a chained wildcard expression into a dictionary or a YAML
object.

Parameters
----------
list_to_parse : list
The list to parse.

Returns
-------
dict or YAML object
The parsed list.
"""
if len(infix) == 1:
return yaml.safe_load(infix[0])
else:
return {infix.pop(0): parse(infix)}


def update_config_with_sector_opts(config, sector_opts):
from snakemake.utils import update_config

for o in sector_opts.split("-"):
if o.startswith("CF+"):
l = o.split("+")[1:]
update_config(config, parse(l))
infix = o.split("+")[1:]
update_config(config, parse(infix))


def get_checksum_from_zenodo(file_url):
Expand Down
14 changes: 6 additions & 8 deletions scripts/add_brownfield.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,15 @@

import logging

logger = logging.getLogger(__name__)

import pandas as pd

idx = pd.IndexSlice

import numpy as np
import pandas as pd
import pypsa
from _helpers import update_config_with_sector_opts
from add_existing_baseyear import add_build_year_to_new_assets

logger = logging.getLogger(__name__)
idx = pd.IndexSlice


def add_brownfield(n, n_p, year):
logger.info(f"Preparing brownfield for the year {year}")
Expand Down Expand Up @@ -121,7 +119,7 @@ def add_brownfield(n, n_p, year):


def disable_grid_expansion_if_LV_limit_hit(n):
if not "lv_limit" in n.global_constraints.index:
if "lv_limit" not in n.global_constraints.index:
return

total_expansion = (
Expand All @@ -133,7 +131,7 @@ def disable_grid_expansion_if_LV_limit_hit(n):

# allow small numerical differences
if lv_limit - total_expansion < 1:
logger.info(f"LV is already reached, disabling expansion and LV limit")
logger.info("LV is already reached, disabling expansion and LV limit")
extendable_acs = n.lines.query("s_nom_extendable").index
n.lines.loc[extendable_acs, "s_nom_extendable"] = False
n.lines.loc[extendable_acs, "s_nom"] = n.lines.loc[extendable_acs, "s_nom_min"]
Expand Down
10 changes: 5 additions & 5 deletions scripts/add_electricity.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,10 +294,10 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.
nuts3 = gpd.read_file(nuts3_shapes).set_index("index")

def upsample(cntry, group):
l = opsd_load[cntry]
load = opsd_load[cntry]

if len(group) == 1:
return pd.DataFrame({group.index[0]: l})
return pd.DataFrame({group.index[0]: load})
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
gdp_n = pd.Series(
Expand All @@ -314,8 +314,8 @@ def upsample(cntry, group):
# overwrite factor because nuts3 provides no data for UA+MD
factors = normed(ua_md_gdp.loc[group.index, "GDP_PPP"].squeeze())
return pd.DataFrame(
factors.values * l.values[:, np.newaxis],
index=l.index,
factors.values * load.values[:, np.newaxis],
index=load.index,
columns=factors.index,
)

Expand Down Expand Up @@ -622,7 +622,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
hydro.max_hours > 0, hydro.country.map(max_hours_country)
).fillna(6)

if flatten_dispatch := params.get("flatten_dispatch", False):
if params.get("flatten_dispatch", False):
buffer = params.get("flatten_dispatch_buffer", 0.2)
average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"]
p_max_pu = (average_capacity_factor + buffer).clip(upper=1)
Expand Down
11 changes: 3 additions & 8 deletions scripts/add_existing_baseyear.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,25 +8,20 @@
"""

import logging

logger = logging.getLogger(__name__)

import pandas as pd

idx = pd.IndexSlice

from types import SimpleNamespace

import country_converter as coco
import numpy as np
import pandas as pd
import pypsa
import xarray as xr
from _helpers import update_config_with_sector_opts
from add_electricity import sanitize_carriers
from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs

logger = logging.getLogger(__name__)
cc = coco.CountryConverter()

idx = pd.IndexSlice
spatial = SimpleNamespace()


Expand Down
2 changes: 1 addition & 1 deletion scripts/build_biomass_potentials.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@

import logging

logger = logging.getLogger(__name__)
import geopandas as gpd
import numpy as np
import pandas as pd

logger = logging.getLogger(__name__)
AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050]


Expand Down
5 changes: 5 additions & 0 deletions scripts/build_biomass_transport_costs.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,4 +80,9 @@ def build_biomass_transport_costs():


if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake

snakemake = mock_snakemake("build_biomass_transport_costs")

build_biomass_transport_costs()
2 changes: 1 addition & 1 deletion scripts/build_clustered_population_layouts.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0)
)

I = cutout.indicatormatrix(clustered_regions)
I = cutout.indicatormatrix(clustered_regions) # noqa: E741

pop = {}
for item in ["total", "urban", "rural"]:
Expand Down
4 changes: 2 additions & 2 deletions scripts/build_electricity_demand.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,13 @@

import logging

logger = logging.getLogger(__name__)
import dateutil
import numpy as np
import pandas as pd
from _helpers import configure_logging
from pandas import Timedelta as Delta

logger = logging.getLogger(__name__)


def load_timeseries(fn, years, countries, powerstatistics=True):
"""
Expand Down
5 changes: 1 addition & 4 deletions scripts/build_energy_totals.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@
"""

import logging

logger = logging.getLogger(__name__)

import multiprocessing as mp
from functools import partial

Expand All @@ -21,7 +18,7 @@
from tqdm import tqdm

cc = coco.CountryConverter()

logger = logging.getLogger(__name__)
idx = pd.IndexSlice


Expand Down
19 changes: 11 additions & 8 deletions scripts/build_gas_input_locations.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,12 @@

import logging

logger = logging.getLogger(__name__)

import geopandas as gpd
import pandas as pd
from cluster_gas_network import load_bus_regions

logger = logging.getLogger(__name__)


def read_scigrid_gas(fn):
df = gpd.read_file(fn)
Expand All @@ -27,8 +27,11 @@ def build_gem_lng_data(fn):
df = pd.read_excel(fn[0], sheet_name="LNG terminals - data")
df = df.set_index("ComboID")

remove_country = ["Cyprus", "Turkey"]
remove_terminal = ["Puerto de la Luz LNG Terminal", "Gran Canaria LNG Terminal"]
remove_country = ["Cyprus", "Turkey"] # noqa: F841
remove_terminal = [
"Puerto de la Luz LNG Terminal",
"Gran Canaria LNG Terminal",
] # noqa: F841

df = df.query(
"Status != 'Cancelled' \
Expand All @@ -45,8 +48,8 @@ def build_gem_prod_data(fn):
df = pd.read_excel(fn[0], sheet_name="Gas extraction - main")
df = df.set_index("GEM Unit ID")

remove_country = ["Cyprus", "Türkiye"]
remove_fuel_type = ["oil"]
remove_country = ["Cyprus", "Türkiye"] # noqa: F841
remove_fuel_type = ["oil"] # noqa: F841

df = df.query(
"Status != 'shut in' \
Expand Down Expand Up @@ -96,8 +99,8 @@ def build_gas_input_locations(gem_fn, entry_fn, sto_fn, countries):
]

sto = read_scigrid_gas(sto_fn)
remove_country = ["RU", "UA", "TR", "BY"]
sto = sto.query("country_code != @remove_country")
remove_country = ["RU", "UA", "TR", "BY"] # noqa: F841
sto = sto.query("country_code not in @remove_country")

# production sites inside the model scope
prod = build_gem_prod_data(gem_fn)
Expand Down
4 changes: 2 additions & 2 deletions scripts/build_gas_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@

import logging

logger = logging.getLogger(__name__)

import geopandas as gpd
import pandas as pd
from pypsa.geo import haversine_pts
from shapely.geometry import Point

logger = logging.getLogger(__name__)


def diameter_to_capacity(pipe_diameter_mm):
"""
Expand Down
2 changes: 1 addition & 1 deletion scripts/build_heat_demand.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0)
)

I = cutout.indicatormatrix(clustered_regions)
I = cutout.indicatormatrix(clustered_regions) # noqa: E741

pop_layout = xr.open_dataarray(snakemake.input.pop_layout)

Expand Down
8 changes: 3 additions & 5 deletions scripts/build_industrial_distribution_key.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@
"""

import logging

logger = logging.getLogger(__name__)

import uuid
from itertools import product

Expand All @@ -18,6 +15,7 @@
import pandas as pd
from packaging.version import Version, parse

logger = logging.getLogger(__name__)
cc = coco.CountryConverter()


Expand All @@ -32,7 +30,7 @@ def locate_missing_industrial_sites(df):
try:
from geopy.extra.rate_limiter import RateLimiter
from geopy.geocoders import Nominatim
except:
except ImportError:
raise ModuleNotFoundError(
"Optional dependency 'geopy' not found."
"Install via 'conda install -c conda-forge geopy'"
Expand Down Expand Up @@ -101,7 +99,7 @@ def prepare_hotmaps_database(regions):
# get all duplicated entries
duplicated_i = gdf.index[gdf.index.duplicated()]
# convert from raw data country name to iso-2-code
code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2")
code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2") # noqa: F841
# screen out malformed country allocation
gdf_filtered = gdf.loc[duplicated_i].query("country == @code")
# concat not duplicated and filtered gdf
Expand Down
6 changes: 2 additions & 4 deletions scripts/build_industrial_production_per_country.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,16 @@
"""

import logging
from functools import partial

logger = logging.getLogger(__name__)

import multiprocessing as mp
from functools import partial

import country_converter as coco
import numpy as np
import pandas as pd
from _helpers import mute_print
from tqdm import tqdm

logger = logging.getLogger(__name__)
cc = coco.CountryConverter()

tj_to_ktoe = 0.0238845
Expand Down
3 changes: 1 addition & 2 deletions scripts/build_line_rating.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@
the maximal possible capacity factor "s_max_pu" for each transmission line at each time step is calculated.
"""

import logging
import re

import atlite
Expand Down Expand Up @@ -99,7 +98,7 @@ def calculate_line_rating(n, cutout):
-------
xarray DataArray object with maximal power.
"""
relevant_lines = n.lines[(n.lines["underground"] == False)]
relevant_lines = n.lines[~n.lines["underground"]]
buses = relevant_lines[["bus0", "bus1"]].values
x = n.buses.x
y = n.buses.y
Expand Down
Loading
Loading