Skip to content

Commit

Permalink
Merge pull request #817 from davide-f/run_world_latest
Browse files Browse the repository at this point in the history
Run world latest
  • Loading branch information
davide-f authored Aug 5, 2023
2 parents 9fd2af6 + 30a8ca7 commit fcb6668
Show file tree
Hide file tree
Showing 7 changed files with 69 additions and 38 deletions.
2 changes: 2 additions & 0 deletions doc/release_notes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ E.g. if a new rule becomes available describe how to use it `snakemake -j1 run_t

* Add NorthAmerican and Earth cutouts, and improve African cutout `PR #813 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/813>`__

* Bug fixing to restore Africa execution and improve performances `PR #817 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/817>`__

PyPSA-Earth 0.2.2
=================

Expand Down
6 changes: 3 additions & 3 deletions scripts/_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
import numpy as np
import pandas as pd

# list of recognised nan values (NA and na excluded as may be confused with Namibia 2-letter country code)
NA_VALUES = ["NULL", "", "N/A", "NAN", "NaN", "nan", "Nan", "n/a", "null"]

REGION_COLS = ["geometry", "name", "x", "y", "country"]


Expand Down Expand Up @@ -572,9 +575,6 @@ def country_name_2_two_digits(country_name):
return full_name


NA_VALUES = ["NULL", "", "N/A", "NAN", "NaN", "nan", "Nan", "n/a", "na", "null"]


def read_csv_nafix(file, **kwargs):
"Function to open a csv as pandas file and standardize the na value"
if "keep_default_na" not in kwargs:
Expand Down
4 changes: 3 additions & 1 deletion scripts/base_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def _find_closest_links(links, new_links, distance_upper_bound=1.5):

def _load_buses_from_osm(fp_buses, config):
buses = (
read_csv_nafix(fp_buses)
read_csv_nafix(fp_buses, dtype=dict(bus_id="str", voltage="float"))
.set_index("bus_id")
.drop(["station_id"], axis=1)
.rename(columns=dict(voltage="v_nom"))
Expand Down Expand Up @@ -198,6 +198,8 @@ def _load_lines_from_osm(fp_osm_lines, config, buses):
bus1="str",
underground="bool",
under_construction="bool",
voltage="float",
circuits="float",
),
)
.set_index("line_id")
Expand Down
17 changes: 14 additions & 3 deletions scripts/build_powerplants.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,18 +280,29 @@ def replace_natural_gas_fueltype(df):
filepath_osm_ppl = snakemake.input.osm_powerplants
filepath_osm2pm_ppl = snakemake.output.powerplants_osm2pm

csv_pm = convert_osm_to_pm(filepath_osm_ppl, filepath_osm2pm_ppl)

n = pypsa.Network(snakemake.input.base_network)
countries_codes = n.buses.country.unique()
countries_names = list(map(two_digits_2_name_country, countries_codes))

config["target_countries"] = countries_names

if "EXTERNAL_DATABASE" in config:
if (
"EXTERNAL_DATABASE"
in config["matching_sources"] + config["fully_included_sources"]
):
if "EXTERNAL_DATABASE" not in config:
logger.error(
"Missing configuration EXTERNAL_DATABASE in powerplantmatching config yaml\n\t"
"Please check file configs/powerplantmatching_config.yaml"
)
logger.info("Parsing OSM generator data to powerplantmatching format")
config["EXTERNAL_DATABASE"]["fn"] = os.path.join(
os.getcwd(), filepath_osm2pm_ppl
)
else:
# create an empty file
with open(filepath_osm2pm_ppl, "w"):
pass

# specify the main query for filtering powerplants
ppl_query = snakemake.config["electricity"]["powerplants_filter"]
Expand Down
59 changes: 37 additions & 22 deletions scripts/clean_osm_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,12 @@ def prepare_substation_df(df_all_substations):
if c not in df_all_substations:
df_all_substations[c] = np.nan

df_all_substations = df_all_substations[clist]
df_all_substations.drop(
df_all_substations.columns[~df_all_substations.columns.isin(clist)],
axis=1,
inplace=True,
errors="ignore",
)

return df_all_substations

Expand Down Expand Up @@ -196,8 +201,13 @@ def filter_voltage(df, threshold_voltage=35000):
# convert voltage to int
df["voltage"] = df["voltage"].astype(int)

# keep only lines with a voltage no lower than than threshold_voltage
df = df[df.voltage >= threshold_voltage]
# drop lines with a voltage lower than than threshold_voltage
df.drop(
df[df.voltage < threshold_voltage].index,
axis=0,
inplace=True,
errors="ignore",
)

return df

Expand Down Expand Up @@ -295,7 +305,12 @@ def prepare_lines_df(df_lines):
if c not in df_lines:
df_lines[c] = np.nan

df_lines = df_lines[clist]
df_lines.drop(
df_lines.columns[~df_lines.columns.isin(clist)],
axis=1,
inplace=True,
errors="ignore",
)

return df_lines

Expand Down Expand Up @@ -776,23 +791,27 @@ def set_countryname_by_shape(
return df


def create_extended_country_shapes(country_shapes, offshore_shapes):
def create_extended_country_shapes(country_shapes, offshore_shapes, tolerance=0.01):
"""
Obtain the extended country shape by merging on- and off-shore shapes.
"""

merged_shapes = gpd.GeoDataFrame(
{
"name": list(country_shapes.index),
"geometry": [
c_geom.unary_union(offshore_shapes[c_code])
if c_code in offshore_shapes
else c_geom
for c_code, c_geom in country_shapes.items()
],
},
crs=country_shapes.crs,
).set_index("name")["geometry"]
merged_shapes = (
gpd.GeoDataFrame(
{
"name": list(country_shapes.index),
"geometry": [
c_geom.unary_union(offshore_shapes[c_code])
if c_code in offshore_shapes
else c_geom
for c_code, c_geom in country_shapes.items()
],
},
crs=country_shapes.crs,
)
.set_index("name")["geometry"]
.buffer(tolerance)
)

return merged_shapes

Expand Down Expand Up @@ -876,11 +895,7 @@ def clean_data(
logger.info("Select lines and cables in the region of interest")

# drop lines crossing regions with and without the region under interest
df_all_lines = df_all_lines[
df_all_lines.apply(
lambda x: africa_shape.contains(x.geometry.boundary), axis=1
)
]
df_all_lines = df_all_lines[df_all_lines.geometry.boundary.within(africa_shape)]

df_all_lines = gpd.GeoDataFrame(df_all_lines, geometry="geometry")

Expand Down
10 changes: 5 additions & 5 deletions scripts/config_osm_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,10 +222,10 @@
"BI": "burundi",
"CM": "cameroon",
# "IC": "canary-islands", # Island
# "CV": "cape-verde",
"CV": "cape-verde",
"CF": "central-african-republic",
"TD": "chad",
# "KM": "comoros",
"KM": "comoros",
"CG": "congo-brazzaville",
"CD": "congo-democratic-republic",
"DJ": "djibouti",
Expand Down Expand Up @@ -257,10 +257,10 @@
# "RE": "reunion", # Island
"RW": "rwanda",
# saint-helena-ascension-and-tristan-da-cunha # Islands
# "ST": "sao-tome-and-principe",
"ST": "sao-tome-and-principe",
"SN": "senegal",
"GM": "gambia",
# "SC": "seychelles",
"SC": "seychelles",
"SL": "sierra-leone",
"SO": "somalia", # No Data
# south-africa-and-lesotho
Expand Down Expand Up @@ -390,7 +390,7 @@
# "GG": "guernsey", # Island
"HU": "hungary",
"IS": "iceland",
# "IE": "ireland-and-northern-ireland",
"IE": "ireland-and-northern-ireland",
# "IM": "isle of man", # Island
"IT": "italy",
# "JE": "jersey", # Island
Expand Down
9 changes: 5 additions & 4 deletions scripts/simplify_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,12 +161,13 @@ def _prepare_connection_costs_per_link(n, costs, config):

connection_costs_per_link = {}

if not n.links.loc[n.links.carrier == "DC"].empty:
dc_lengths = n.links.length
unterwater_fractions = n.links.underwater_fraction
elif not n.lines.loc[n.lines.carrier == "DC"].empty:
# initialize dc_lengths and underwater_fractions by the hvdc_as_lines option
if config["electricity"]["hvdc_as_lines"]:
dc_lengths = n.lines.length
unterwater_fractions = n.lines.underwater_fraction
else:
dc_lengths = n.links.length
unterwater_fractions = n.links.underwater_fraction

for tech in config["renewable"]:
if tech.startswith("offwind"):
Expand Down

0 comments on commit fcb6668

Please sign in to comment.